diff --git a/CHANGELOG.md b/CHANGELOG.md index c65b0e6cb52..48f955548fc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,9 @@ * [CHANGE] Ingester: remove experimental flags `-ingest-storage.kafka.ongoing-records-per-fetch` and `-ingest-storage.kafka.startup-records-per-fetch`. They are removed in favour of `-ingest-storage.kafka.max-buffered-bytes`. #9906 * [CHANGE] Ingester: Replace `cortex_discarded_samples_total` label from `sample-out-of-bounds` to `sample-timestamp-too-old`. #9885 * [CHANGE] Ruler: the `/prometheus/config/v1/rules` does not return an error anymore if a rule group is missing in the object storage after been successfully returned by listing the storage, because it could have been deleted in the meanwhile. #9936 +* [CHANGE] Querier: The `.` pattern in regular expressions in PromQL matches newline characters. With this change regular expressions like `.*` match strings that include `\n`. To maintain the old behaviour, you will have to change regular expressions by replacing all `.` patterns with `[^\n]`, e.g. `foo[^\n]*`. This upgrades PromQL compatibility from Prometheus 2.0 to 3.0. #9844 +* [CHANGE] Querier: Lookback and range selectors are left open and right closed (previously left closed and right closed). This change affects queries when the evaluation time perfectly aligns with the sample timestamps. For example assume querying a timeseries with evenly spaced samples exactly 1 minute apart. Previously, a range query with `5m` would usually return 5 samples, or 6 samples if the query evaluation aligns perfectly with a scrape. Now, queries like this will always return 5 samples. This upgrades PromQL compatibility from Prometheus 2.0 to 3.0. #9844 +* [CHANGE] Querier: promql(native histograms): Introduce exponential interpolation. #9844 * [FEATURE] Querier: add experimental streaming PromQL engine, enabled with `-querier.query-engine=mimir`. #9367 #9368 #9398 #9399 #9403 #9417 #9418 #9419 #9420 #9482 #9504 #9505 #9507 #9518 #9531 #9532 #9533 #9553 #9558 #9588 #9589 #9639 #9641 #9642 #9651 #9664 #9681 #9717 #9719 #9724 #9874 #9998 #10007 #10010 * [FEATURE] Distributor: Add support for `lz4` OTLP compression. #9763 * [FEATURE] Query-frontend: added experimental configuration options `query-frontend.cache-errors` and `query-frontend.results-cache-ttl-for-errors` to allow non-transient responses to be cached. When set to `true` error responses from hitting limits or bad data are cached for a short TTL. #9028 @@ -39,6 +42,7 @@ * [FEATURE] Ruler: Add experimental support for caching the contents of rule groups. This is disabled by default and can be enabled by setting `-ruler-storage.cache.rule-group-enabled`. #9595 #10024 * [FEATURE] PromQL: Add experimental `info` function. Experimental functions are disabled by default, but can be enabled setting `-querier.promql-experimental-functions-enabled=true` in the query-frontend and querier. #9879 * [FEATURE] Distributor: Support promotion of OTel resource attributes to labels. #8271 +* [FEATURE] Querier: Add experimental `double_exponential_smoothing` PromQL function. Experimental functions are disabled by default, but can be enabled by setting `-querier.promql-experimental-functions-enabled=true` in the query-frontend and querier. #9844 * [ENHANCEMENT] Query Frontend: Return server-side `bytes_processed` statistics following Server-Timing format. #9645 #9985 * [ENHANCEMENT] mimirtool: Adds bearer token support for mimirtool's analyze ruler/prometheus commands. #9587 * [ENHANCEMENT] Ruler: Support `exclude_alerts` parameter in `/api/v1/rules` endpoint. #9300 @@ -70,6 +74,7 @@ * [ENHANCEMENT] Ingester: `-ingest-storage.kafka.max-buffered-bytes` to limit the memory for buffered records when using concurrent fetching. #9892 * [ENHANCEMENT] Querier: improve performance and memory consumption of queries that select many series. #9914 * [ENHANCEMENT] Ruler: Support OAuth2 and proxies in Alertmanager client #9945 +* [ENHANCEMENT] Ingester: Build 24h blocks for older OOO. #9844 * [BUGFIX] Fix issue where functions such as `rate()` over native histograms could return incorrect values if a float stale marker was present in the selected range. #9508 * [BUGFIX] Fix issue where negation of native histograms (eg. `-some_native_histogram_series`) did nothing. #9508 * [BUGFIX] Fix issue where `metric might not be a counter, name does not end in _total/_sum/_count/_bucket` annotation would be emitted even if `rate` or `increase` did not have enough samples to compute a result. #9508 @@ -85,6 +90,11 @@ * [BUGFIX] Ingester: Fix data loss bug in the experimental ingest storage when a Kafka Fetch is split into multiple requests and some of them return an error. #9963 #9964 * [BUGFIX] PromQL: `round` now removes the metric name again. #9879 * [BUGFIX] Query-Frontend: fix `QueryFrontendCodec` module initialization to set lookback delta from `-querier.lookback-delta`. #9984 +* [BUGFIX] OTLP: Support integer exemplar value type. #9844 +* [BUGFIX] Querier: Correct the behaviour of binary operators between native histograms and floats. #9844 +* [BUGFIX] Querier: Fix stddev+stdvar aggregations to always ignore native histograms. #9844 +* [BUGFIX] Querier: Fix stddev+stdvar aggregations to treat Infinity consistently. #9844 +* [BUGFIX] Ingester: Chunks could have one unnecessary zero byte at the end. #9844 ### Mixin diff --git a/go.mod b/go.mod index 3a01cfe520f..6d5c5e0fd3e 100644 --- a/go.mod +++ b/go.mod @@ -76,6 +76,7 @@ require ( github.com/prometheus/procfs v0.15.1 github.com/shirou/gopsutil/v4 v4.24.10 github.com/thanos-io/objstore v0.0.0-20241111205755-d1dd89d41f97 + github.com/tjhop/slog-gokit v0.1.2 github.com/twmb/franz-go v1.18.0 github.com/twmb/franz-go/pkg/kadm v1.14.0 github.com/twmb/franz-go/pkg/kfake v0.0.0-20241111190909-81ceb1a064be @@ -84,8 +85,8 @@ require ( github.com/twmb/franz-go/plugin/kprom v1.1.0 github.com/xlab/treeprint v1.2.0 go.opentelemetry.io/collector/pdata v1.20.0 - go.opentelemetry.io/otel v1.30.0 - go.opentelemetry.io/otel/trace v1.30.0 + go.opentelemetry.io/otel v1.31.0 + go.opentelemetry.io/otel/trace v1.31.0 go.uber.org/multierr v1.11.0 golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f golang.org/x/term v0.26.0 @@ -113,7 +114,7 @@ require ( github.com/go-test/deep v1.1.0 // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/golang/glog v1.2.1 // indirect + github.com/golang/glog v1.2.2 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect @@ -136,14 +137,13 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.55.0 // indirect - go.opentelemetry.io/otel/sdk v1.30.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.30.0 // indirect gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect gopkg.in/mail.v2 v2.3.1 // indirect gopkg.in/telebot.v3 v3.2.1 // indirect - k8s.io/apimachinery v0.31.0 // indirect - k8s.io/client-go v0.31.0 // indirect + k8s.io/apimachinery v0.31.1 // indirect + k8s.io/client-go v0.31.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect ) @@ -248,7 +248,7 @@ require ( github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/exporter-toolkit v0.12.0 // indirect + github.com/prometheus/exporter-toolkit v0.13.0 // indirect github.com/rainycape/unidecode v0.0.0-20150907023854-cb7f23ec59be // indirect github.com/rs/cors v1.11.0 // indirect github.com/rs/xid v1.6.0 // indirect @@ -266,9 +266,9 @@ require ( go.etcd.io/etcd/client/v3 v3.5.4 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/semconv v0.108.1 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect - go.opentelemetry.io/otel/metric v1.30.0 // indirect + go.opentelemetry.io/collector/semconv v0.110.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect go.uber.org/zap v1.21.0 // indirect golang.org/x/mod v0.22.0 // indirect golang.org/x/oauth2 v0.24.0 // indirect @@ -284,7 +284,7 @@ require ( ) // Using a fork of Prometheus with Mimir-specific changes. -replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20241120160701-db938c3ceac8 +replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20241121154025-fd7e39a8798e // Replace memberlist with our fork which includes some fixes that haven't been // merged upstream yet: diff --git a/go.sum b/go.sum index cbb085c8020..58a48022f50 100644 --- a/go.sum +++ b/go.sum @@ -930,8 +930,9 @@ github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 h1:FpZSn61BWXbtyH68+uSv416veEswX1M2HRyQfdHnOyQ= github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= @@ -953,14 +954,14 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczC github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/digitalocean/godo v1.122.0 h1:ziytLQi8QKtDp2K1A+YrYl2dWLHLh2uaMzWvcz9HkKg= -github.com/digitalocean/godo v1.122.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY= +github.com/digitalocean/godo v1.126.0 h1:+Znh7VMQj/E8ArbjWnc7OKGjWfzC+I8OCSRp7r1MdD8= +github.com/digitalocean/godo v1.126.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/docker v27.2.0+incompatible h1:Rk9nIVdfH3+Vz4cyI/uhbINhEZ/oLmc+CBXmH6fbNk4= -github.com/docker/docker v27.2.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y= github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -1085,8 +1086,8 @@ github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96 github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= -github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I= +github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= @@ -1117,8 +1118,9 @@ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGw github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -1252,8 +1254,8 @@ github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPq github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8= -github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw= +github.com/gophercloud/gophercloud v1.14.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -1276,8 +1278,8 @@ github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40 h1:1TeKhyS+pvzO github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40/go.mod h1:IGRj8oOoxwJbHBYl1+OhS9UjQR0dv6SQOep7HqmtyFU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/mimir-prometheus v0.0.0-20241120160701-db938c3ceac8 h1:y9kz0U/FgKalDnzS+2TbTdKytdvZrzqqX4eh3I26vZA= -github.com/grafana/mimir-prometheus v0.0.0-20241120160701-db938c3ceac8/go.mod h1:M4xmfU7SsnzjkLwJfvNen/MxAZp4DJPfipLzeib+0gQ= +github.com/grafana/mimir-prometheus v0.0.0-20241121154025-fd7e39a8798e h1:WjcrK2YdNFRsB/7kbtZVmgNmZ7pf3TY9dWgEfHHkUHI= +github.com/grafana/mimir-prometheus v0.0.0-20241121154025-fd7e39a8798e/go.mod h1:5pZyo8JoQezsp5hvVLlWhXmWLFcjUCC0fFvmswy2cBA= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956 h1:em1oddjXL8c1tL0iFdtVtPloq2hRPen2MJQKoAWpxu0= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/grafana/prometheus-alertmanager v0.25.1-0.20240930132144-b5e64e81e8d3 h1:6D2gGAwyQBElSrp3E+9lSr7k8gLuP3Aiy20rweLWeBw= @@ -1447,8 +1449,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/linode/linodego v1.40.0 h1:7ESY0PwK94hoggoCtIroT1Xk6b1flrFBNZ6KwqbTqlI= -github.com/linode/linodego v1.40.0/go.mod h1:NsUw4l8QrLdIofRg1NYFBbW5ZERnmbZykVBszPZLORM= +github.com/linode/linodego v1.41.0 h1:GcP7JIBr9iLRJ9FwAtb9/WCT1DuPJS/xUApapfdjtiY= +github.com/linode/linodego v1.41.0/go.mod h1:Ow4/XZ0yvWBzt3iAHwchvhSx30AyLintsSMvvQ2/SJY= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= @@ -1614,8 +1616,8 @@ github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPA github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.12.0 h1:DkE5RcEZR3lQA2QD5JLVQIf41dFKNsVMXFhgqcif7fo= -github.com/prometheus/exporter-toolkit v0.12.0/go.mod h1:fQH0KtTn0yrrS0S82kqppRjDDiwMfIQUwT+RBRRhwUc= +github.com/prometheus/exporter-toolkit v0.13.0 h1:lmA0Q+8IaXgmFRKw09RldZmZdnvu9wwcDLIXGmTPw1c= +github.com/prometheus/exporter-toolkit v0.13.0/go.mod h1:2uop99EZl80KdXhv/MxVI2181fMcwlsumFOqBecGkG0= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -1716,6 +1718,8 @@ github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4 github.com/tencentyun/cos-go-sdk-v5 v0.7.40/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw= github.com/thanos-io/objstore v0.0.0-20241111205755-d1dd89d41f97 h1:VjG0mwhN1DkncwDHFvrpd12/2TLfgYNRmEQA48ikp+0= github.com/thanos-io/objstore v0.0.0-20241111205755-d1dd89d41f97/go.mod h1:vyzFrBXgP+fGNG2FopEGWOO/zrIuoy7zt3LpLeezRsw= +github.com/tjhop/slog-gokit v0.1.2 h1:pmQI4SvU9h4gA0vIQsdhJQSqQg4mOmsPykG2/PM3j1I= +github.com/tjhop/slog-gokit v0.1.2/go.mod h1:8fhlcp8C8ELbg3GCyKv06tgt4B5sDq2P1r2DQAu1HuM= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= @@ -1776,24 +1780,24 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/collector/pdata v1.20.0 h1:ePcwt4bdtISP0loHaE+C9xYoU2ZkIvWv89Fob16o9SM= go.opentelemetry.io/collector/pdata v1.20.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs= -go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4= -go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= +go.opentelemetry.io/collector/semconv v0.110.0 h1:KHQnOHe3gUz0zsxe8ph9kN5OTypCFD4V+06AiBTfeNk= +go.opentelemetry.io/collector/semconv v0.110.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.55.0 h1:sqmsIQ75l6lfZjjpnXXT9DFVtYEDg6CH0/Cn4/3A1Wg= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.55.0/go.mod h1:rsg1EO8LXSs2po50PB5CeY/MSVlhghuKBgXlKnqm6ks= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= -go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= -go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= -go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= -go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM= go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y= -go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= -go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -2579,12 +2583,12 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= -k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= -k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= -k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= -k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= diff --git a/integration/distributor_high_concurrency_test.go b/integration/distributor_high_concurrency_test.go index 2d66262fd7e..cc56a3fbd33 100644 --- a/integration/distributor_high_concurrency_test.go +++ b/integration/distributor_high_concurrency_test.go @@ -127,7 +127,7 @@ func testDistributorHighConcurrency(t *testing.T, cachingUnmarshalDataEnabled bo require.Equal(t, samples, added) // query all samples back - query := fmt.Sprintf("%s[%s]", serName, model.Duration(timeRange)) + query := fmt.Sprintf("%s[%s]", serName, model.Duration(timeRange+time.Millisecond)) // Add millisecond to ensure we get the first point (ranges are left-open). result, err := client.Query(query, writeEnd) require.NoError(t, err) require.Equal(t, exp, result) diff --git a/integration/query_frontend_cache_test.go b/integration/query_frontend_cache_test.go index b62b6188cd1..7f5ea41ddb4 100644 --- a/integration/query_frontend_cache_test.go +++ b/integration/query_frontend_cache_test.go @@ -167,10 +167,11 @@ func generateExpectedFloats(start time.Time, end time.Time, step time.Duration, val := expectedVector[0].Value const lookbackPeriod = 5 * time.Minute + rangeEnd := sampleTime.Add(lookbackPeriod) values := []model.SamplePair(nil) for ts := start; !ts.After(end); ts = ts.Add(step) { - if ts.Before(sampleTime) || ts.After(sampleTime.Add(lookbackPeriod)) { + if ts.Before(sampleTime) || ts.After(rangeEnd) || ts.Equal(rangeEnd) { continue } values = append(values, model.SamplePair{ @@ -192,10 +193,11 @@ func generateExpectedHistograms(start time.Time, end time.Time, step time.Durati hist := expectedVector[0].Histogram const lookbackPeriod = 5 * time.Minute + rangeEnd := sampleTime.Add(lookbackPeriod) histograms := []model.SampleHistogramPair(nil) for ts := start; !ts.After(end); ts = ts.Add(step) { - if ts.Before(sampleTime) || ts.After(sampleTime.Add(lookbackPeriod)) { + if ts.Before(sampleTime) || ts.After(rangeEnd) || ts.Equal(rangeEnd) { continue } histograms = append(histograms, model.SampleHistogramPair{ diff --git a/integration/query_frontend_test.go b/integration/query_frontend_test.go index 707a5b625e7..4cfdc35d643 100644 --- a/integration/query_frontend_test.go +++ b/integration/query_frontend_test.go @@ -720,7 +720,7 @@ overrides: return c.QueryRangeRaw(`sum_over_time(metric[31d:1s])`, now.Add(-time.Minute), now, time.Minute) }, expStatusCode: http.StatusUnprocessableEntity, - expJSON: fmt.Sprintf(`{"error":"expanding series: %s", "errorType":"execution", "status":"error"}`, mimirquerier.NewMaxQueryLengthError((744*time.Hour)+(6*time.Minute), 720*time.Hour)), + expJSON: fmt.Sprintf(`{"error":"expanding series: %s", "errorType":"execution", "status":"error"}`, mimirquerier.NewMaxQueryLengthError((744*time.Hour)+(6*time.Minute)-time.Millisecond, 720*time.Hour)), }, { name: "query remote read time range exceeds the limit", diff --git a/integration/ruler_test.go b/integration/ruler_test.go index 5f01cdf8620..d3ec69db3b9 100644 --- a/integration/ruler_test.go +++ b/integration/ruler_test.go @@ -749,7 +749,7 @@ func TestRulerMetricsForInvalidQueriesAndNoFetchedSeries(t *testing.T) { zeroSeriesQueries = getZeroSeriesQueriesTotal() const groupName4 = "good_rule_with_fetched_series_and_samples_and_non_series_selector" - const expression4 = `sum(metric{foo=~"1|2"}) + vector(1.2345)` + const expression4 = `sum(metric{foo=~"1|2"}) * vector(1.2345)` addNewRuleAndWait(groupName4, expression4, false) // Ensure that samples were returned. diff --git a/pkg/api/handlers.go b/pkg/api/handlers.go index 886301c8659..798f4f0e20d 100644 --- a/pkg/api/handlers.go +++ b/pkg/api/handlers.go @@ -38,6 +38,7 @@ import ( "github.com/grafana/mimir/pkg/usagestats" "github.com/grafana/mimir/pkg/util" "github.com/grafana/mimir/pkg/util/chunkinfologger" + util_log "github.com/grafana/mimir/pkg/util/log" "github.com/grafana/mimir/pkg/util/validation" ) @@ -268,13 +269,15 @@ func NewQuerierHandler( nil, // Only needed for admin APIs. "", // This is for snapshots, which is disabled when admin APIs are disabled. Hence empty. false, // Disable admin APIs. - logger, + util_log.SlogFromGoKit(logger), func(context.Context) v1.RulesRetriever { return &querier.DummyRulesRetriever{} }, 0, 0, 0, // Remote read samples and concurrency limit. false, // Not an agent. regexp.MustCompile(".*"), func() (v1.RuntimeInfo, error) { return v1.RuntimeInfo{}, errors.New("not implemented") }, &v1.PrometheusVersion{}, + nil, + nil, // This is used for the stats API which we should not support. Or find other ways to. prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { return nil, nil }), reg, diff --git a/pkg/blockbuilder/blockbuilder_test.go b/pkg/blockbuilder/blockbuilder_test.go index 462ba3bb238..73154161777 100644 --- a/pkg/blockbuilder/blockbuilder_test.go +++ b/pkg/blockbuilder/blockbuilder_test.go @@ -16,6 +16,7 @@ import ( "github.com/grafana/dskit/services" "github.com/prometheus/client_golang/prometheus" promtest "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" "github.com/stretchr/testify/assert" @@ -164,7 +165,7 @@ func TestBlockBuilder_StartWithExistingCommit(t *testing.T) { expSamples := producedSamples[1+(len(producedSamples)/2):] bucketDir := path.Join(cfg.BlocksStorage.Bucket.Filesystem.Directory, "1") - db, err := tsdb.Open(bucketDir, log.NewNopLogger(), nil, nil, nil) + db, err := tsdb.Open(bucketDir, promslog.NewNopLogger(), nil, nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) compareQuery(t, @@ -291,7 +292,7 @@ func TestBlockBuilder_ReachHighWatermarkBeforeLastCycleSection(t *testing.T) { `), "cortex_blockbuilder_consumer_lag_records")) bucketDir := path.Join(cfg.BlocksStorage.Bucket.Filesystem.Directory, "1") - db, err := tsdb.Open(bucketDir, log.NewNopLogger(), nil, nil, nil) + db, err := tsdb.Open(bucketDir, promslog.NewNopLogger(), nil, nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) compareQuery(t, @@ -351,7 +352,7 @@ func TestBlockBuilder_WithMultipleTenants(t *testing.T) { for _, tenant := range tenants { bucketDir := path.Join(cfg.BlocksStorage.Bucket.Filesystem.Directory, tenant) - db, err := tsdb.Open(bucketDir, log.NewNopLogger(), nil, nil, nil) + db, err := tsdb.Open(bucketDir, promslog.NewNopLogger(), nil, nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) @@ -446,7 +447,7 @@ func TestBlockBuilder_WithNonMonotonicRecordTimestamps(t *testing.T) { require.NoError(t, bb.nextConsumeCycle(ctx, end)) bucketDir := path.Join(cfg.BlocksStorage.Bucket.Filesystem.Directory, tenantID) - db, err := tsdb.Open(bucketDir, log.NewNopLogger(), nil, nil, nil) + db, err := tsdb.Open(bucketDir, promslog.NewNopLogger(), nil, nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) @@ -517,7 +518,7 @@ func TestBlockBuilder_RetryOnTransientErrors(t *testing.T) { require.Eventually(t, func() bool { return kafkaCommits.Load() >= 1 }, 50*time.Second, 100*time.Millisecond, "expected kafka commits") bucketDir := path.Join(cfg.BlocksStorage.Bucket.Filesystem.Directory, "1") - db, err := tsdb.Open(bucketDir, log.NewNopLogger(), nil, nil, nil) + db, err := tsdb.Open(bucketDir, promslog.NewNopLogger(), nil, nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) compareQuery(t, diff --git a/pkg/blockbuilder/tsdb.go b/pkg/blockbuilder/tsdb.go index ab865f39a5e..feb5f8dbdd5 100644 --- a/pkg/blockbuilder/tsdb.go +++ b/pkg/blockbuilder/tsdb.go @@ -256,7 +256,7 @@ func (b *TSDBBuilder) newTSDB(tenant tsdbTenant) (*userTSDB, error) { userID := tenant.tenantID userLogger := util_log.WithUserID(userID, b.logger) - db, err := tsdb.Open(udir, userLogger, nil, &tsdb.Options{ + db, err := tsdb.Open(udir, util_log.SlogFromGoKit(userLogger), nil, &tsdb.Options{ RetentionDuration: 0, MinBlockDuration: 2 * time.Hour.Milliseconds(), MaxBlockDuration: 2 * time.Hour.Milliseconds(), diff --git a/pkg/blockbuilder/tsdb_test.go b/pkg/blockbuilder/tsdb_test.go index 05c0e3f6e77..6d6f9eb97a0 100644 --- a/pkg/blockbuilder/tsdb_test.go +++ b/pkg/blockbuilder/tsdb_test.go @@ -19,6 +19,7 @@ import ( "github.com/grafana/dskit/flagext" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -265,7 +266,7 @@ func TestTSDBBuilder(t *testing.T) { require.NoError(t, err) require.Nil(t, builder.tsdbs[tenant]) - newDB, err := tsdb.Open(shipperDir, log.NewNopLogger(), nil, nil, nil) + newDB, err := tsdb.Open(shipperDir, promslog.NewNopLogger(), nil, nil, nil) require.NoError(t, err) // One for the in-order current range. Two for the out-of-order blocks: one for the current range diff --git a/pkg/compactor/bucket_compactor_e2e_test.go b/pkg/compactor/bucket_compactor_e2e_test.go index 88c129ed706..ded4174523d 100644 --- a/pkg/compactor/bucket_compactor_e2e_test.go +++ b/pkg/compactor/bucket_compactor_e2e_test.go @@ -27,6 +27,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" promtest "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/index" @@ -37,6 +38,7 @@ import ( "golang.org/x/sync/errgroup" "github.com/grafana/mimir/pkg/storage/tsdb/block" + util_log "github.com/grafana/mimir/pkg/util/log" ) func TestSyncer_GarbageCollect_e2e(t *testing.T) { @@ -232,7 +234,7 @@ func TestGroupCompactE2E(t *testing.T) { sy, err := newMetaSyncer(nil, nil, bkt, metaFetcher, duplicateBlocksFilter, blocksMarkedForDeletion) require.NoError(t, err) - comp, err := tsdb.NewLeveledCompactor(ctx, reg, logger, []int64{1000, 3000}, nil, nil) + comp, err := tsdb.NewLeveledCompactor(ctx, reg, util_log.SlogFromGoKit(logger), []int64{1000, 3000}, nil, nil) require.NoError(t, err) planner := NewSplitAndMergePlanner([]int64{1000, 3000}) @@ -690,7 +692,7 @@ func createBlockWithOptions( if err := g.Wait(); err != nil { return id, err } - c, err := tsdb.NewLeveledCompactor(ctx, nil, log.NewNopLogger(), []int64{maxt - mint}, nil, nil) + c, err := tsdb.NewLeveledCompactor(ctx, nil, promslog.NewNopLogger(), []int64{maxt - mint}, nil, nil) if err != nil { return id, errors.Wrap(err, "create compactor") } diff --git a/pkg/compactor/split_merge_compactor.go b/pkg/compactor/split_merge_compactor.go index 154c8454dc4..bfc63ad1431 100644 --- a/pkg/compactor/split_merge_compactor.go +++ b/pkg/compactor/split_merge_compactor.go @@ -8,6 +8,8 @@ import ( "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/tsdb" + + util_log "github.com/grafana/mimir/pkg/util/log" ) func splitAndMergeGrouperFactory(_ context.Context, cfg Config, cfgProvider ConfigProvider, userID string, logger log.Logger, _ prometheus.Registerer) Grouper { @@ -21,7 +23,7 @@ func splitAndMergeGrouperFactory(_ context.Context, cfg Config, cfgProvider Conf func splitAndMergeCompactorFactory(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (Compactor, Planner, error) { // We don't need to customise the TSDB compactor so we're just using the Prometheus one. - compactor, err := tsdb.NewLeveledCompactor(ctx, reg, logger, cfg.BlockRanges.ToMilliseconds(), nil, nil) + compactor, err := tsdb.NewLeveledCompactor(ctx, reg, util_log.SlogFromGoKit(logger), cfg.BlockRanges.ToMilliseconds(), nil, nil) if err != nil { return nil, nil, err } diff --git a/pkg/compactor/split_merge_compactor_test.go b/pkg/compactor/split_merge_compactor_test.go index 3614a9b5c0f..8f008dc020d 100644 --- a/pkg/compactor/split_merge_compactor_test.go +++ b/pkg/compactor/split_merge_compactor_test.go @@ -28,6 +28,7 @@ import ( "github.com/grafana/mimir/pkg/storage/sharding" mimir_tsdb "github.com/grafana/mimir/pkg/storage/tsdb" "github.com/grafana/mimir/pkg/storage/tsdb/block" + util_log "github.com/grafana/mimir/pkg/util/log" util_test "github.com/grafana/mimir/pkg/util/test" ) @@ -774,7 +775,7 @@ func TestMultitenantCompactor_ShouldGuaranteeSeriesShardingConsistencyOverTheTim for _, actualMeta := range actualMetas { expectedSeriesIDs := expectedSeriesIDByShard[actualMeta.Thanos.Labels[mimir_tsdb.CompactorShardIDExternalLabel]] - b, err := tsdb.OpenBlock(logger, filepath.Join(storageDir, userID, actualMeta.ULID.String()), nil) + b, err := tsdb.OpenBlock(util_log.SlogFromGoKit(logger), filepath.Join(storageDir, userID, actualMeta.ULID.String()), nil) require.NoError(t, err) indexReader, err := b.Index() diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 0c8bca9b8e3..3723d46b669 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -61,6 +61,11 @@ import ( "github.com/grafana/mimir/pkg/util/validation" ) +func init() { + // Mimir doesn't support Prometheus' UTF-8 metric/label name scheme yet. + model.NameValidationScheme = model.LegacyValidation +} + var ( // Validation errors. errInvalidTenantShardSize = errors.New("invalid tenant shard size, the value must be greater than or equal to zero") diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index ffdb0533b0e..d6f17f22a7f 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -1274,6 +1274,11 @@ func TestDistributor_Push_LabelNameValidation(t *testing.T) { skipLabelNameValidationReq: true, errExpected: false, }, + "UTF-8 characters are not accepted": { + inputLabels: labelAdapters(model.MetricNameLabel, "foo", "😊", "smile!"), + errExpected: true, + errMessage: `received a series with an invalid label: '😊' series: 'foo{😊="smile!"}' (err-mimir-label-invalid)`, + }, } for testName, tc := range tests { diff --git a/pkg/distributor/otel.go b/pkg/distributor/otel.go index 861b2cb9323..b4754fb85c9 100644 --- a/pkg/distributor/otel.go +++ b/pkg/distributor/otel.go @@ -413,7 +413,7 @@ func otelMetricsToTimeseries(ctx context.Context, tenantID string, addSuffixes, AddMetricSuffixes: addSuffixes, EnableCreatedTimestampZeroIngestion: enableCTZeroIngestion, PromoteResourceAttributes: promoteResourceAttributes, - }, logger) + }, utillog.SlogFromGoKit(logger)) mimirTS := converter.TimeSeries() if errs != nil { dropped := len(multierr.Errors(errs)) diff --git a/pkg/distributor/otlp/helper_generated.go b/pkg/distributor/otlp/helper_generated.go index 7889da3a355..5265b862601 100644 --- a/pkg/distributor/otlp/helper_generated.go +++ b/pkg/distributor/otlp/helper_generated.go @@ -23,6 +23,7 @@ import ( "encoding/hex" "fmt" "log" + "log/slog" "math" "slices" "sort" @@ -30,8 +31,6 @@ import ( "unicode/utf8" "github.com/cespare/xxhash/v2" - gokitlog "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -247,7 +246,7 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool { // However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets: // https://github.com/prometheus/prometheus/issues/13485. func (c *MimirConverter) addHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, - resource pcommon.Resource, settings Settings, baseName string, logger gokitlog.Logger) error { + resource pcommon.Resource, settings Settings, baseName string, logger *slog.Logger) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -339,7 +338,7 @@ func (c *MimirConverter) addHistogramDataPoints(ctx context.Context, dataPoints labels := createLabels(baseName+createdSuffix, baseLabels) c.addTimeSeriesIfNeeded(labels, startTimestampMs, pt.Timestamp()) } - level.Debug(logger).Log("labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "histogram") + logger.Debug("addHistogramDataPoints", "labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "histogram") } return nil @@ -361,9 +360,17 @@ func getPromExemplars[T exemplarType](ctx context.Context, everyN *everyNTimes, exemplarRunes := 0 promExemplar := mimirpb.Exemplar{ - Value: exemplar.DoubleValue(), TimestampMs: timestamp.FromTime(exemplar.Timestamp().AsTime()), } + switch exemplar.ValueType() { + case pmetric.ExemplarValueTypeInt: + promExemplar.Value = float64(exemplar.IntValue()) + case pmetric.ExemplarValueTypeDouble: + promExemplar.Value = exemplar.DoubleValue() + default: + return nil, fmt.Errorf("unsupported exemplar value type: %v", exemplar.ValueType()) + } + if traceID := exemplar.TraceID(); !traceID.IsEmpty() { val := hex.EncodeToString(traceID[:]) exemplarRunes += utf8.RuneCountInString(traceIDKey) + utf8.RuneCountInString(val) @@ -445,7 +452,7 @@ func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { } func (c *MimirConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource, - settings Settings, baseName string, logger gokitlog.Logger) error { + settings Settings, baseName string, logger *slog.Logger) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -502,7 +509,7 @@ func (c *MimirConverter) addSummaryDataPoints(ctx context.Context, dataPoints pm c.addTimeSeriesIfNeeded(createdLabels, startTimestampMs, pt.Timestamp()) } - level.Debug(logger).Log("labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "summary") + logger.Debug("addSummaryDataPoints", "labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "summary") } return nil @@ -586,7 +593,7 @@ const defaultIntervalForStartTimestamps = int64(300_000) // make use of its direct support fort Created Timestamps instead. // See https://opentelemetry.io/docs/specs/otel/metrics/data-model/#resets-and-gaps to know more about how OTel handles // resets for cumulative metrics. -func (c *MimirConverter) handleStartTime(startTs, ts int64, labels []mimirpb.LabelAdapter, settings Settings, typ string, value float64, logger gokitlog.Logger) { +func (c *MimirConverter) handleStartTime(startTs, ts int64, labels []mimirpb.LabelAdapter, settings Settings, typ string, value float64, logger *slog.Logger) { if !settings.EnableCreatedTimestampZeroIngestion { return } @@ -608,7 +615,7 @@ func (c *MimirConverter) handleStartTime(startTs, ts int64, labels []mimirpb.Lab return } - level.Debug(logger).Log("msg", "adding zero value at start_ts", "type", typ, "labels", labelsStringer(labels), "start_ts", startTs, "sample_ts", ts, "sample_value", value) + logger.Debug("adding zero value at start_ts", "type", typ, "labels", labelsStringer(labels), "start_ts", startTs, "sample_ts", ts, "sample_value", value) // See https://github.com/prometheus/prometheus/issues/14600 for context. c.addSample(&mimirpb.Sample{TimestampMs: startTs}, labels) @@ -684,10 +691,10 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta return } - ts := convertTimeStamp(timestamp) sample := &mimirpb.Sample{ - Value: float64(1), - TimestampMs: ts, + Value: float64(1), + // convert ns to ms + TimestampMs: convertTimeStamp(timestamp), } converter.addSample(sample, labels) } diff --git a/pkg/distributor/otlp/histograms_generated.go b/pkg/distributor/otlp/histograms_generated.go index c1bc72c7088..09a97758fb9 100644 --- a/pkg/distributor/otlp/histograms_generated.go +++ b/pkg/distributor/otlp/histograms_generated.go @@ -63,7 +63,6 @@ func (c *MimirConverter) addExponentialHistogramDataPoints(ctx context.Context, promName, ) ts, _ := c.getOrCreateTimeSeries(lbls) - ts.Histograms = append(ts.Histograms, histogram) exemplars, err := getPromExemplars[pmetric.ExponentialHistogramDataPoint](ctx, &c.everyN, pt) diff --git a/pkg/distributor/otlp/metrics_to_prw_generated.go b/pkg/distributor/otlp/metrics_to_prw_generated.go index 89f9facc2dc..ea8526657e6 100644 --- a/pkg/distributor/otlp/metrics_to_prw_generated.go +++ b/pkg/distributor/otlp/metrics_to_prw_generated.go @@ -22,17 +22,18 @@ import ( "context" "errors" "fmt" + "log/slog" "sort" "strings" "time" - "github.com/go-kit/log" - prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" - "github.com/prometheus/prometheus/util/annotations" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" + prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" + "github.com/prometheus/prometheus/util/annotations" + "github.com/grafana/mimir/pkg/mimirpb" ) @@ -69,7 +70,7 @@ func NewMimirConverter() *MimirConverter { } // FromMetrics converts pmetric.Metrics to Mimir remote write format. -func (c *MimirConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings, logger log.Logger) (annots annotations.Annotations, errs error) { +func (c *MimirConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings, logger *slog.Logger) (annots annotations.Annotations, errs error) { c.everyN = everyNTimes{n: 128} resourceMetricsSlice := md.ResourceMetrics() for i := 0; i < resourceMetricsSlice.Len(); i++ { diff --git a/pkg/distributor/otlp/number_data_points_generated.go b/pkg/distributor/otlp/number_data_points_generated.go index 4b07e749213..f6e32c3cb8b 100644 --- a/pkg/distributor/otlp/number_data_points_generated.go +++ b/pkg/distributor/otlp/number_data_points_generated.go @@ -20,10 +20,9 @@ package otlp import ( "context" + "log/slog" "math" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -50,9 +49,9 @@ func (c *MimirConverter) addGaugeNumberDataPoints(ctx context.Context, dataPoint model.MetricNameLabel, name, ) - timestamp := convertTimeStamp(pt.Timestamp()) sample := &mimirpb.Sample{ - TimestampMs: timestamp, + // convert ns to ms + TimestampMs: convertTimeStamp(pt.Timestamp()), } switch pt.ValueType() { case pmetric.NumberDataPointValueTypeInt: @@ -70,7 +69,7 @@ func (c *MimirConverter) addGaugeNumberDataPoints(ctx context.Context, dataPoint } func (c *MimirConverter) addSumNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, - resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string, logger log.Logger) error { + resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string, logger *slog.Logger) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -88,8 +87,8 @@ func (c *MimirConverter) addSumNumberDataPoints(ctx context.Context, dataPoints model.MetricNameLabel, name, ) - sample := &mimirpb.Sample{ + // convert ns to ms TimestampMs: timestamp, } switch pt.ValueType() { @@ -130,7 +129,7 @@ func (c *MimirConverter) addSumNumberDataPoints(ctx context.Context, dataPoints } c.addTimeSeriesIfNeeded(createdLabels, startTimestampMs, pt.Timestamp()) } - level.Debug(logger).Log("labels", labelsStringer(lbls), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "sum") + logger.Debug("addSumNumberDataPoints", "labels", labelsStringer(lbls), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "sum") } return nil diff --git a/pkg/frontend/querymiddleware/astmapper/astmapper.go b/pkg/frontend/querymiddleware/astmapper/astmapper.go index a79aa986ef6..95136f0317a 100644 --- a/pkg/frontend/querymiddleware/astmapper/astmapper.go +++ b/pkg/frontend/querymiddleware/astmapper/astmapper.go @@ -8,8 +8,14 @@ package astmapper import ( "github.com/pkg/errors" "github.com/prometheus/prometheus/promql/parser" + + "github.com/grafana/mimir/pkg/util/promqlext" ) +func init() { + promqlext.ExtendPromQL() +} + // ASTMapper is the exported interface for mapping between multiple AST representations type ASTMapper interface { // Map the input expr and returns the mapped expr. diff --git a/pkg/frontend/querymiddleware/astmapper/instant_splitting_test.go b/pkg/frontend/querymiddleware/astmapper/instant_splitting_test.go index 55db5ea48eb..1b5afcd7e4d 100644 --- a/pkg/frontend/querymiddleware/astmapper/instant_splitting_test.go +++ b/pkg/frontend/querymiddleware/astmapper/instant_splitting_test.go @@ -466,6 +466,7 @@ func TestInstantSplitterSkippedQueryReason(t *testing.T) { query: `deriv({app="foo"}[3m])`, skippedReason: SkippedReasonNonSplittable, }, + // holt_winters is a backwards compatible, non-experimental, alias for double_exponential_smoothing. { query: `holt_winters({app="foo"}[3m], 1, 10)`, skippedReason: SkippedReasonNonSplittable, diff --git a/pkg/frontend/querymiddleware/cardinality_query_cache_test.go b/pkg/frontend/querymiddleware/cardinality_query_cache_test.go index 89c5f2d6401..b63c0cbb3e6 100644 --- a/pkg/frontend/querymiddleware/cardinality_query_cache_test.go +++ b/pkg/frontend/querymiddleware/cardinality_query_cache_test.go @@ -14,9 +14,10 @@ import ( "github.com/grafana/dskit/cache" "github.com/grafana/dskit/tenant" "github.com/grafana/dskit/user" - "github.com/prometheus/prometheus/util/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + mimirtest "github.com/grafana/mimir/pkg/util/test" ) func TestCardinalityQueryCache_RoundTrip_WithTenantFederation(t *testing.T) { @@ -66,7 +67,7 @@ func TestCardinalityQueryCache_RoundTrip_WithTenantFederation(t *testing.T) { cacheBackend := cache.NewInstrumentedMockCache() limits := multiTenantMockLimits{byTenant: testData.limits} - rt := newCardinalityQueryCacheRoundTripper(cacheBackend, DefaultCacheKeyGenerator{}, limits, downstream, testutil.NewLogger(t), nil) + rt := newCardinalityQueryCacheRoundTripper(cacheBackend, DefaultCacheKeyGenerator{}, limits, downstream, mimirtest.NewTestingLogger(t), nil) res, err := rt.RoundTrip(req) require.NoError(t, err) diff --git a/pkg/frontend/querymiddleware/codec_test.go b/pkg/frontend/querymiddleware/codec_test.go index 39c9a9b4698..22e0e9d6804 100644 --- a/pkg/frontend/querymiddleware/codec_test.go +++ b/pkg/frontend/querymiddleware/codec_test.go @@ -205,74 +205,74 @@ func TestMetricsQuery_MinMaxTime(t *testing.T) { { name: "range query: without range vector, without offset", metricsQuery: rangeRequest, - expectedMinT: startTime.UnixMilli() - lookbackDurationMS, + expectedMinT: startTime.UnixMilli() - lookbackDurationMS + 1, expectedMaxT: endTime.UnixMilli(), }, { name: "instant query: without range vector, without offset", metricsQuery: instantRequest, - expectedMinT: endTime.UnixMilli() - lookbackDurationMS, + expectedMinT: endTime.UnixMilli() - lookbackDurationMS + 1, expectedMaxT: endTime.UnixMilli(), }, { name: "range query: with range vector, without offset", metricsQuery: withQuery(t, rangeRequest, fmt.Sprintf("rate(go_goroutines{}[%s])", rangeVectorDurationStr)), - expectedMinT: startTime.UnixMilli() - rangeVectorDurationMS, // lookback duration not used with range vectors + expectedMinT: startTime.UnixMilli() - rangeVectorDurationMS + 1, // lookback duration not used with range vectors expectedMaxT: endTime.UnixMilli(), }, { name: "instant query: with range vector, without offset", metricsQuery: withQuery(t, instantRequest, fmt.Sprintf("rate(go_goroutines{}[%s])", rangeVectorDurationStr)), - expectedMinT: endTime.UnixMilli() - rangeVectorDurationMS, // lookback duration not used with range vectors + expectedMinT: endTime.UnixMilli() - rangeVectorDurationMS + 1, // lookback duration not used with range vectors expectedMaxT: endTime.UnixMilli(), }, { name: "range query: without range vector, with offset", metricsQuery: withQuery(t, rangeRequest, fmt.Sprintf("go_goroutines{} offset %s", offsetDurationStr)), - expectedMinT: startTime.UnixMilli() - offsetDurationMS - lookbackDurationMS, + expectedMinT: startTime.UnixMilli() - offsetDurationMS - lookbackDurationMS + 1, expectedMaxT: endTime.UnixMilli() - offsetDurationMS, }, { name: "instant query: without range vector, with offset", metricsQuery: withQuery(t, instantRequest, fmt.Sprintf("go_goroutines{} offset %s", offsetDurationStr)), - expectedMinT: endTime.UnixMilli() - offsetDurationMS - lookbackDurationMS, + expectedMinT: endTime.UnixMilli() - offsetDurationMS - lookbackDurationMS + 1, expectedMaxT: endTime.UnixMilli() - offsetDurationMS, }, { name: "range query: with range vector, with offset", metricsQuery: withQuery(t, rangeRequest, fmt.Sprintf("rate(go_goroutines{}[%s] offset %s)", rangeVectorDurationStr, offsetDurationStr)), - expectedMinT: startTime.UnixMilli() - rangeVectorDurationMS - offsetDurationMS, // lookback duration not used with range vectors + expectedMinT: startTime.UnixMilli() - rangeVectorDurationMS - offsetDurationMS + 1, // lookback duration not used with range vectors expectedMaxT: endTime.UnixMilli() - offsetDurationMS, }, { name: "instant query: with range vector, with offset", metricsQuery: withQuery(t, instantRequest, fmt.Sprintf("rate(go_goroutines{}[%s] offset %s)", rangeVectorDurationStr, offsetDurationStr)), - expectedMinT: endTime.UnixMilli() - rangeVectorDurationMS - offsetDurationMS, // lookback duration not used with range vectors + expectedMinT: endTime.UnixMilli() - rangeVectorDurationMS - offsetDurationMS + 1, // lookback duration not used with range vectors expectedMaxT: endTime.UnixMilli() - offsetDurationMS, }, // permutations with and without range vectors and @ modifiers { name: "range query: with @ modifer", metricsQuery: withQuery(t, rangeRequest, fmt.Sprintf("go_goroutines{} @ %d", endTime.Add(-atModifierDuration).Unix())), - expectedMinT: endTime.Add(-atModifierDuration).UnixMilli() - lookbackDurationMS, + expectedMinT: endTime.Add(-atModifierDuration).UnixMilli() - lookbackDurationMS + 1, expectedMaxT: endTime.Add(-atModifierDuration).UnixMilli(), }, { name: "instant query: with @ modifer", metricsQuery: withQuery(t, instantRequest, fmt.Sprintf("go_goroutines{} @ %d", endTime.Add(-atModifierDuration).Unix())), - expectedMinT: endTime.Add(-atModifierDuration).UnixMilli() - lookbackDurationMS, // lookback duration not used with range vectors + expectedMinT: endTime.Add(-atModifierDuration).UnixMilli() - lookbackDurationMS + 1, expectedMaxT: endTime.Add(-atModifierDuration).UnixMilli(), }, { name: "range query: with range vector, with @ modifer", metricsQuery: withQuery(t, rangeRequest, fmt.Sprintf("go_goroutines{}[%s] @ %d", rangeVectorDurationStr, endTime.Add(-atModifierDuration).Unix())), - expectedMinT: endTime.Add(-(atModifierDuration + rangeVectorDuration)).UnixMilli(), // lookback duration not used with range vectors + expectedMinT: endTime.Add(-(atModifierDuration + rangeVectorDuration)).UnixMilli() + 1, // lookback duration not used with range vectors expectedMaxT: endTime.Add(-atModifierDuration).UnixMilli(), }, { name: "instant query: with range vector, with @ modifer", metricsQuery: withQuery(t, instantRequest, fmt.Sprintf("go_goroutines{}[%s] @ %d", rangeVectorDurationStr, endTime.Add(-atModifierDuration).Unix())), - expectedMinT: endTime.Add(-(atModifierDuration + rangeVectorDuration)).UnixMilli(), // lookback duration not used with range vectors + expectedMinT: endTime.Add(-(atModifierDuration + rangeVectorDuration)).UnixMilli() + 1, // lookback duration not used with range vectors expectedMaxT: endTime.Add(-atModifierDuration).UnixMilli(), }, } { @@ -338,7 +338,7 @@ func TestMetricsQuery_WithStartEnd_TransformConsistency(t *testing.T) { updatedStartTime: updatedStartTime, updatedEndTime: updatedEndTime, - expectedUpdatedMinT: updatedStartTime.UnixMilli(), + expectedUpdatedMinT: updatedStartTime.UnixMilli() + 1, // query range is left-open, but minT is inclusive expectedUpdatedMaxT: updatedEndTime.UnixMilli(), }, { @@ -347,7 +347,7 @@ func TestMetricsQuery_WithStartEnd_TransformConsistency(t *testing.T) { updatedStartTime: updatedEndTime, updatedEndTime: updatedEndTime, - expectedUpdatedMinT: updatedEndTime.UnixMilli(), + expectedUpdatedMinT: updatedEndTime.UnixMilli() + 1, // query range is left-open, but minT is inclusive expectedUpdatedMaxT: updatedEndTime.UnixMilli(), }, } { @@ -418,7 +418,7 @@ func TestMetricsQuery_WithQuery_WithExpr_TransformConsistency(t *testing.T) { initialMetricsQuery: rangeRequest, updatedQuery: fmt.Sprintf("rate(go_goroutines{}[%s] offset %s)", rangeVectorDurationStr, offsetDurationStr), - expectedUpdatedMinT: startTime.UnixMilli() - rangeVectorDurationMS - offsetDurationMS, + expectedUpdatedMinT: startTime.UnixMilli() - rangeVectorDurationMS - offsetDurationMS + 1, expectedUpdatedMaxT: endTime.UnixMilli() - offsetDurationMS, expectedErr: nil, }, @@ -427,7 +427,7 @@ func TestMetricsQuery_WithQuery_WithExpr_TransformConsistency(t *testing.T) { initialMetricsQuery: instantRequest, updatedQuery: fmt.Sprintf("rate(go_goroutines{}[%s] offset %s)", rangeVectorDurationStr, offsetDurationStr), - expectedUpdatedMinT: endTime.UnixMilli() - rangeVectorDurationMS - offsetDurationMS, + expectedUpdatedMinT: endTime.UnixMilli() - rangeVectorDurationMS - offsetDurationMS + 1, expectedUpdatedMaxT: endTime.UnixMilli() - offsetDurationMS, expectedErr: nil, }, diff --git a/pkg/frontend/querymiddleware/generic_query_cache_test.go b/pkg/frontend/querymiddleware/generic_query_cache_test.go index eae385bad9f..5b4811afca0 100644 --- a/pkg/frontend/querymiddleware/generic_query_cache_test.go +++ b/pkg/frontend/querymiddleware/generic_query_cache_test.go @@ -18,9 +18,10 @@ import ( "github.com/grafana/dskit/user" "github.com/prometheus/client_golang/prometheus" promtest "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/prometheus/prometheus/util/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + mimirtest "github.com/grafana/mimir/pkg/util/test" ) type newGenericQueryCacheFunc func(cache cache.Cache, splitter CacheKeyGenerator, limits Limits, next http.RoundTripper, logger log.Logger, reg prometheus.Registerer) http.RoundTripper @@ -226,7 +227,7 @@ func testGenericQueryCacheRoundTrip(t *testing.T, newRoundTripper newGenericQuer initialStoreCallsCount := cacheBackend.CountStoreCalls() reg := prometheus.NewPedanticRegistry() - rt := newRoundTripper(cacheBackend, DefaultCacheKeyGenerator{codec: NewPrometheusCodec(reg, 0*time.Minute, formatJSON, nil)}, limits, downstream, testutil.NewLogger(t), reg) + rt := newRoundTripper(cacheBackend, DefaultCacheKeyGenerator{codec: NewPrometheusCodec(reg, 0*time.Minute, formatJSON, nil)}, limits, downstream, mimirtest.NewTestingLogger(t), reg) res, err := rt.RoundTrip(req) require.NoError(t, err) diff --git a/pkg/frontend/querymiddleware/model_extra_test.go b/pkg/frontend/querymiddleware/model_extra_test.go index e45bb7df664..70be7d47822 100644 --- a/pkg/frontend/querymiddleware/model_extra_test.go +++ b/pkg/frontend/querymiddleware/model_extra_test.go @@ -185,13 +185,13 @@ func TestPrometheusRangeQueryRequest_MinTMaxT(t *testing.T) { "instant vector selector, not modified": { query: "some_metric{}", withFn: func(req MetricsQueryRequest) (MetricsQueryRequest, error) { return req, nil }, - expectedMinT: start.Add(-1 * defaultLookback).UnixMilli(), // Default lookback. + expectedMinT: start.Add(-1*defaultLookback).UnixMilli() + 1, // Default lookback. expectedMaxT: end.UnixMilli(), }, "range vector selector, not modified": { query: "some_metric{}[10m]", withFn: func(req MetricsQueryRequest) (MetricsQueryRequest, error) { return req, nil }, - expectedMinT: start.Add(-10 * time.Minute).UnixMilli(), // Lookback is overridden by the range. + expectedMinT: start.Add(-10*time.Minute).UnixMilli() + 1, // Lookback is overridden by the range. expectedMaxT: end.UnixMilli(), }, "instant vector query, WithStartEnd": { @@ -199,7 +199,7 @@ func TestPrometheusRangeQueryRequest_MinTMaxT(t *testing.T) { withFn: func(req MetricsQueryRequest) (MetricsQueryRequest, error) { return req.WithStartEnd(start.Add(-1*time.Hour).UnixMilli(), end.Add(5*time.Minute).UnixMilli()) }, - expectedMinT: start.Add(-1 * time.Hour).Add(-1 * defaultLookback).UnixMilli(), + expectedMinT: start.Add(-1*time.Hour).Add(-1*defaultLookback).UnixMilli() + 1, expectedMaxT: end.Add(5 * time.Minute).UnixMilli(), }, "range vector query, WithStartEnd": { @@ -207,7 +207,7 @@ func TestPrometheusRangeQueryRequest_MinTMaxT(t *testing.T) { withFn: func(req MetricsQueryRequest) (MetricsQueryRequest, error) { return req.WithStartEnd(start.Add(-1*time.Hour).UnixMilli(), end.Add(5*time.Minute).UnixMilli()) }, - expectedMinT: start.Add(-1 * time.Hour).Add(-10 * time.Minute).UnixMilli(), + expectedMinT: start.Add(-1*time.Hour).Add(-10*time.Minute).UnixMilli() + 1, expectedMaxT: end.Add(5 * time.Minute).UnixMilli(), }, "instant vector query, WithQuery": { @@ -215,7 +215,7 @@ func TestPrometheusRangeQueryRequest_MinTMaxT(t *testing.T) { withFn: func(req MetricsQueryRequest) (MetricsQueryRequest, error) { return req.WithQuery("other_metric{}") }, - expectedMinT: start.Add(-1 * defaultLookback).UnixMilli(), // Default lookback. + expectedMinT: start.Add(-1*defaultLookback).UnixMilli() + 1, // Default lookback. expectedMaxT: end.UnixMilli(), }, "range vector query, WithQuery": { @@ -223,7 +223,7 @@ func TestPrometheusRangeQueryRequest_MinTMaxT(t *testing.T) { withFn: func(req MetricsQueryRequest) (MetricsQueryRequest, error) { return req.WithQuery("some_metric{}[20m]") }, - expectedMinT: start.Add(-20 * time.Minute).UnixMilli(), // Lookback is overridden by the range. + expectedMinT: start.Add(-20*time.Minute).UnixMilli() + 1, // Lookback is overridden by the range. expectedMaxT: end.UnixMilli(), }, "instant vector query, WithExpr": { @@ -235,7 +235,7 @@ func TestPrometheusRangeQueryRequest_MinTMaxT(t *testing.T) { } return req.WithExpr(newExpr) }, - expectedMinT: start.Add(-1 * defaultLookback).UnixMilli(), // Default lookback. + expectedMinT: start.Add(-1*defaultLookback).UnixMilli() + 1, // Default lookback. expectedMaxT: end.UnixMilli(), }, "range vector query, WithExpr": { @@ -247,7 +247,7 @@ func TestPrometheusRangeQueryRequest_MinTMaxT(t *testing.T) { } return req.WithExpr(newExpr) }, - expectedMinT: start.Add(-20 * time.Minute).UnixMilli(), // Lookback is overridden by the range. + expectedMinT: start.Add(-20*time.Minute).UnixMilli() + 1, // Lookback is overridden by the range. expectedMaxT: end.UnixMilli(), }, } @@ -289,13 +289,13 @@ func TestPrometheusInstantQueryRequest_MinTMaxT(t *testing.T) { "instant vector selector, not modified": { query: "some_metric{}", withFn: func(req MetricsQueryRequest) (MetricsQueryRequest, error) { return req, nil }, - expectedMinT: now.Add(-1 * defaultLookback).UnixMilli(), // Default lookback. + expectedMinT: now.Add(-1*defaultLookback).UnixMilli() + 1, // Default lookback. expectedMaxT: now.UnixMilli(), }, "range vector selector, not modified": { query: "some_metric{}[10m]", withFn: func(req MetricsQueryRequest) (MetricsQueryRequest, error) { return req, nil }, - expectedMinT: now.Add(-10 * time.Minute).UnixMilli(), // Lookback is overridden by the range. + expectedMinT: now.Add(-10*time.Minute).UnixMilli() + 1, // Lookback is overridden by the range. expectedMaxT: now.UnixMilli(), }, "instant vector selector, WithStartEnd": { @@ -303,7 +303,7 @@ func TestPrometheusInstantQueryRequest_MinTMaxT(t *testing.T) { withFn: func(req MetricsQueryRequest) (MetricsQueryRequest, error) { return req.WithStartEnd(now.Add(-1*time.Hour).UnixMilli(), 42) // 42 is ignored. }, - expectedMinT: now.Add(-1 * time.Hour).Add(-1 * defaultLookback).UnixMilli(), + expectedMinT: now.Add(-1*time.Hour).Add(-1*defaultLookback).UnixMilli() + 1, expectedMaxT: now.Add(-1 * time.Hour).UnixMilli(), }, "range vector selector, WithStartEnd": { @@ -311,7 +311,7 @@ func TestPrometheusInstantQueryRequest_MinTMaxT(t *testing.T) { withFn: func(req MetricsQueryRequest) (MetricsQueryRequest, error) { return req.WithStartEnd(now.Add(-1*time.Hour).UnixMilli(), 42) // 42 is ignored. }, - expectedMinT: now.Add(-1 * time.Hour).Add(-10 * time.Minute).UnixMilli(), + expectedMinT: now.Add(-1*time.Hour).Add(-10*time.Minute).UnixMilli() + 1, expectedMaxT: now.Add(-1 * time.Hour).UnixMilli(), }, "instant vector selector, WithQuery": { @@ -319,7 +319,7 @@ func TestPrometheusInstantQueryRequest_MinTMaxT(t *testing.T) { withFn: func(req MetricsQueryRequest) (MetricsQueryRequest, error) { return req.WithQuery("other_metric{}") }, - expectedMinT: now.Add(-1 * defaultLookback).UnixMilli(), // Default lookback. + expectedMinT: now.Add(-1*defaultLookback).UnixMilli() + 1, // Default lookback. expectedMaxT: now.UnixMilli(), }, "range vector selector, WithQuery": { @@ -327,7 +327,7 @@ func TestPrometheusInstantQueryRequest_MinTMaxT(t *testing.T) { withFn: func(req MetricsQueryRequest) (MetricsQueryRequest, error) { return req.WithQuery("some_metric{}[20m]") }, - expectedMinT: now.Add(-20 * time.Minute).UnixMilli(), // Lookback is overridden by the range. + expectedMinT: now.Add(-20*time.Minute).UnixMilli() + 1, // Lookback is overridden by the range. expectedMaxT: now.UnixMilli(), }, "instant vector selector, WithExpr": { @@ -339,7 +339,7 @@ func TestPrometheusInstantQueryRequest_MinTMaxT(t *testing.T) { } return req.WithExpr(newExpr) }, - expectedMinT: now.Add(-1 * defaultLookback).UnixMilli(), // Default lookback. + expectedMinT: now.Add(-1*defaultLookback).UnixMilli() + 1, // Default lookback. expectedMaxT: now.UnixMilli(), }, "range vector selector, WithExpr": { @@ -351,7 +351,7 @@ func TestPrometheusInstantQueryRequest_MinTMaxT(t *testing.T) { } return req.WithExpr(newExpr) }, - expectedMinT: now.Add(-20 * time.Minute).UnixMilli(), // Lookback is overridden by the range. + expectedMinT: now.Add(-20*time.Minute).UnixMilli() + 1, // Lookback is overridden by the range. expectedMaxT: now.UnixMilli(), }, } diff --git a/pkg/frontend/querymiddleware/prune_test.go b/pkg/frontend/querymiddleware/prune_test.go index d0ea3cdda5a..71cd3b34a0f 100644 --- a/pkg/frontend/querymiddleware/prune_test.go +++ b/pkg/frontend/querymiddleware/prune_test.go @@ -46,17 +46,17 @@ func TestQueryPruning(t *testing.T) { } templates := []template{ - {`avg(rate(%s[1m])) < (-1 * +Inf)`, true}, - {`avg(rate(%s[1m])) < (+1 * +Inf)`, false}, - {`avg(rate(%s[1m])) < (-1 * -Inf)`, false}, - {`avg(rate(%s[1m])) < (+1 * -Inf)`, true}, - {`(-1 * -Inf) < avg(rate(%s[1m]))`, true}, - {`((-1 * -Inf) < avg(rate(foo[2m]))) or avg(rate(%s[1m]))`, false}, - {`((-1 * -Inf) < avg(rate(foo[2m]))) and avg(rate(%s[1m]))`, true}, - {`((-1 * -Inf) < avg(rate(foo[2m]))) unless avg(rate(%s[1m]))`, true}, - {`avg(rate(%s[1m])) unless ((-1 * -Inf) < avg(rate(foo[2m])))`, false}, - {`(((-1 * -Inf) < avg(rate(foo[3m]))) unless avg(rate(foo[2m]))) or avg(rate(%s[1m]))`, true}, - {`((((-1 * -Inf) < avg(rate(foo[4m]))) unless avg(rate(foo[3m]))) and avg(rate(foo[2m]))) or avg(rate(%s[1m]))`, true}, + {`avg(rate(%s[1m1s])) < (-1 * +Inf)`, true}, + {`avg(rate(%s[1m1s])) < (+1 * +Inf)`, false}, + {`avg(rate(%s[1m1s])) < (-1 * -Inf)`, false}, + {`avg(rate(%s[1m1s])) < (+1 * -Inf)`, true}, + {`(-1 * -Inf) < avg(rate(%s[1m1s]))`, true}, + {`((-1 * -Inf) < avg(rate(foo[2m1s]))) or avg(rate(%s[1m1s]))`, false}, + {`((-1 * -Inf) < avg(rate(foo[2m1s]))) and avg(rate(%s[1m1s]))`, true}, + {`((-1 * -Inf) < avg(rate(foo[2m1s]))) unless avg(rate(%s[1m1s]))`, true}, + {`avg(rate(%s[1m1s])) unless ((-1 * -Inf) < avg(rate(foo[2m1s])))`, false}, + {`(((-1 * -Inf) < avg(rate(foo[3m1s]))) unless avg(rate(foo[2m1s]))) or avg(rate(%s[1m1s]))`, true}, + {`((((-1 * -Inf) < avg(rate(foo[4m1s]))) unless avg(rate(foo[3m1s]))) and avg(rate(foo[2m1s]))) or avg(rate(%s[1m1s]))`, true}, } for _, template := range templates { t.Run(template.query, func(t *testing.T) { diff --git a/pkg/frontend/querymiddleware/querysharding_test.go b/pkg/frontend/querymiddleware/querysharding_test.go index 3ab7549f848..24aefe429f4 100644 --- a/pkg/frontend/querymiddleware/querysharding_test.go +++ b/pkg/frontend/querymiddleware/querysharding_test.go @@ -24,6 +24,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" @@ -1009,7 +1010,6 @@ func TestQuerySharding_FunctionCorrectness(t *testing.T) { {fn: "clamp", args: []string{"5", "10"}}, {fn: "clamp_max", args: []string{"5"}}, {fn: "clamp_min", args: []string{"5"}}, - {fn: "round", args: []string{"20"}}, {fn: "label_replace", args: []string{`"fuzz"`, `"$1"`, `"foo"`, `"b(.*)"`}}, {fn: "label_join", args: []string{`"fuzz"`, `","`, `"foo"`, `"bar"`}}, } @@ -1028,6 +1028,7 @@ func TestQuerySharding_FunctionCorrectness(t *testing.T) { {fn: "log2"}, {fn: "max_over_time", rangeQuery: true}, {fn: "min_over_time", rangeQuery: true}, + {fn: "round", args: []string{"20"}}, {fn: "sqrt"}, {fn: "deg"}, {fn: "asinh"}, @@ -1051,6 +1052,8 @@ func TestQuerySharding_FunctionCorrectness(t *testing.T) { {fn: "mad_over_time", rangeQuery: true, tpl: `((bar1{}))`}, {fn: "sgn"}, {fn: "predict_linear", args: []string{"1"}, rangeQuery: true}, + {fn: "double_exponential_smoothing", args: []string{"0.5", "0.7"}, rangeQuery: true}, + // holt_winters is a backwards compatible alias for double_exponential_smoothing. {fn: "holt_winters", args: []string{"0.5", "0.7"}, rangeQuery: true}, } testsForNativeHistogramsOnly := []queryShardingFunctionCorrectnessTest{ @@ -1514,7 +1517,7 @@ func TestQuerySharding_ShouldReturnErrorInCorrectFormat(t *testing.T) { var ( engine = newEngine() engineTimeout = promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 10e6, Timeout: 50 * time.Millisecond, @@ -1527,7 +1530,7 @@ func TestQuerySharding_ShouldReturnErrorInCorrectFormat(t *testing.T) { }, }) engineSampleLimit = promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 1, Timeout: time.Hour, @@ -1941,7 +1944,7 @@ func BenchmarkQuerySharding(b *testing.B) { time.Millisecond / 10, } { engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 100000000, Timeout: time.Minute, @@ -2509,7 +2512,7 @@ func (i *seriesIteratorMock) Warnings() annotations.Annotations { // newEngine creates and return a new promql.Engine used for testing. func newEngine() *promql.Engine { return promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 10e6, Timeout: 1 * time.Hour, diff --git a/pkg/frontend/querymiddleware/roundtrip_test.go b/pkg/frontend/querymiddleware/roundtrip_test.go index 65f5216ffcb..aeb78323b9f 100644 --- a/pkg/frontend/querymiddleware/roundtrip_test.go +++ b/pkg/frontend/querymiddleware/roundtrip_test.go @@ -30,6 +30,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/promql" "github.com/stretchr/testify/assert" @@ -82,7 +83,7 @@ func TestTripperware_RangeQuery(t *testing.T) { newTestPrometheusCodec(), nil, promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 1000, Timeout: time.Minute, @@ -135,7 +136,7 @@ func TestTripperware_InstantQuery(t *testing.T) { codec, nil, promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 1000, Timeout: time.Minute, @@ -505,7 +506,7 @@ func TestTripperware_Metrics(t *testing.T) { newTestPrometheusCodec(), nil, promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 1000, Timeout: time.Minute, @@ -770,7 +771,7 @@ func TestTripperware_RemoteRead(t *testing.T) { newTestPrometheusCodec(), nil, promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 1000, Timeout: time.Minute, @@ -905,7 +906,7 @@ func TestTripperware_ShouldSupportReadConsistencyOffsetsInjection(t *testing.T) NewPrometheusCodec(nil, 0, formatJSON, nil), nil, promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 1000, Timeout: time.Minute, diff --git a/pkg/frontend/querymiddleware/split_and_cache_test.go b/pkg/frontend/querymiddleware/split_and_cache_test.go index f1676c4da1c..4bb691f6a93 100644 --- a/pkg/frontend/querymiddleware/split_and_cache_test.go +++ b/pkg/frontend/querymiddleware/split_and_cache_test.go @@ -1753,6 +1753,7 @@ func TestSplitQueryByInterval(t *testing.T) { queryFooSubqueryAtStartExpr, _ := parser.ParseExpr(queryFooSubqueryAtStart) queryFooSubqueryAtZero := "sum_over_time(foo[1d:] @ 0.000)" queryFooSubqueryAtZeroExpr, _ := parser.ParseExpr(queryFooSubqueryAtZero) + lookbackDelta := 5 * time.Minute for i, tc := range []struct { input MetricsQueryRequest @@ -1760,160 +1761,160 @@ func TestSplitQueryByInterval(t *testing.T) { interval time.Duration }{ { - input: &PrometheusRangeQueryRequest{start: 0, end: 60 * 60 * seconds, step: 15 * seconds, queryExpr: queryFooExpr}, + input: &PrometheusRangeQueryRequest{start: 0, end: 60 * 60 * seconds, step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{start: 0, end: 60 * 60 * seconds, maxT: 60 * 60 * seconds, step: 15 * seconds, queryExpr: queryFooExpr}, + &PrometheusRangeQueryRequest{start: 0, end: 60 * 60 * seconds, minT: -lookbackDelta.Milliseconds() + 1, maxT: 60 * 60 * seconds, step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, }, interval: day, }, { - input: &PrometheusRangeQueryRequest{start: 0, end: 60 * 60 * seconds, step: 15 * seconds, queryExpr: queryFooExpr}, + input: &PrometheusRangeQueryRequest{start: 0, end: 60 * 60 * seconds, step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{start: 0, end: 60 * 60 * seconds, maxT: 60 * 60 * seconds, step: 15 * seconds, queryExpr: queryFooExpr}, + &PrometheusRangeQueryRequest{start: 0, end: 60 * 60 * seconds, minT: -lookbackDelta.Milliseconds() + 1, maxT: 60 * 60 * seconds, step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, }, interval: 3 * time.Hour, }, { - input: &PrometheusRangeQueryRequest{start: 0, end: 24 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr}, + input: &PrometheusRangeQueryRequest{start: 0, end: 24 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{start: 0, end: 24 * 3600 * seconds, maxT: 24 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr}, + &PrometheusRangeQueryRequest{start: 0, end: 24 * 3600 * seconds, minT: -lookbackDelta.Milliseconds() + 1, maxT: 24 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, }, interval: day, }, { - input: &PrometheusRangeQueryRequest{start: 0, end: 3 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr}, + input: &PrometheusRangeQueryRequest{start: 0, end: 3 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{start: 0, end: 3 * 3600 * seconds, maxT: 3 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr}, + &PrometheusRangeQueryRequest{start: 0, end: 3 * 3600 * seconds, minT: -lookbackDelta.Milliseconds() + 1, maxT: 3 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, }, interval: 3 * time.Hour, }, { - input: &PrometheusRangeQueryRequest{start: 0, end: 2 * 24 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooAtStartExpr}, + input: &PrometheusRangeQueryRequest{start: 0, end: 2 * 24 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooAtStartExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{start: 0, end: (24 * 3600 * seconds) - (15 * seconds), step: 15 * seconds, queryExpr: queryFooAtZeroExpr}, - &PrometheusRangeQueryRequest{start: 24 * 3600 * seconds, end: 2 * 24 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooAtZeroExpr}, + &PrometheusRangeQueryRequest{start: 0, end: (24 * 3600 * seconds) - (15 * seconds), minT: -lookbackDelta.Milliseconds() + 1, step: 15 * seconds, queryExpr: queryFooAtZeroExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: 24 * 3600 * seconds, end: 2 * 24 * 3600 * seconds, minT: -lookbackDelta.Milliseconds() + 1, step: 15 * seconds, queryExpr: queryFooAtZeroExpr, lookbackDelta: lookbackDelta}, }, interval: day, }, { - input: &PrometheusRangeQueryRequest{minT: -(24 * 3600 * seconds), start: 0, end: 2 * 24 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooSubqueryAtStartExpr}, + input: &PrometheusRangeQueryRequest{minT: -(24 * 3600 * seconds), start: 0, end: 2 * 24 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooSubqueryAtStartExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{minT: -(24 * 3600 * seconds), start: 0, end: (24 * 3600 * seconds) - (15 * seconds), step: 15 * seconds, queryExpr: queryFooSubqueryAtZeroExpr}, - &PrometheusRangeQueryRequest{minT: -(24 * 3600 * seconds), start: 24 * 3600 * seconds, end: 2 * 24 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooSubqueryAtZeroExpr}, + &PrometheusRangeQueryRequest{minT: -(24 * 3600 * seconds) - lookbackDelta.Milliseconds() + 1, start: 0, end: (24 * 3600 * seconds) - (15 * seconds), step: 15 * seconds, queryExpr: queryFooSubqueryAtZeroExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{minT: -(24 * 3600 * seconds) - lookbackDelta.Milliseconds() + 1, start: 24 * 3600 * seconds, end: 2 * 24 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooSubqueryAtZeroExpr, lookbackDelta: lookbackDelta}, }, interval: day, }, { - input: &PrometheusRangeQueryRequest{start: 0, end: 2 * 3 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr}, + input: &PrometheusRangeQueryRequest{start: 0, end: 2 * 3 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{start: 0, end: (3 * 3600 * seconds) - (15 * seconds), maxT: (3 * 3600 * seconds) - (15 * seconds), step: 15 * seconds, queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: 3 * 3600 * seconds, end: 2 * 3 * 3600 * seconds, minT: 3 * 3600 * seconds, maxT: 2 * 3 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr}, + &PrometheusRangeQueryRequest{start: 0, end: (3 * 3600 * seconds) - (15 * seconds), minT: -lookbackDelta.Milliseconds() + 1, maxT: (3 * 3600 * seconds) - (15 * seconds), step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: 3 * 3600 * seconds, end: 2 * 3 * 3600 * seconds, minT: 3*3600*seconds - lookbackDelta.Milliseconds() + 1, maxT: 2 * 3 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, }, interval: 3 * time.Hour, }, { - input: &PrometheusRangeQueryRequest{start: 3 * 3600 * seconds, end: 3 * 24 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr}, + input: &PrometheusRangeQueryRequest{start: 3 * 3600 * seconds, end: 3 * 24 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{start: 3 * 3600 * seconds, minT: 3 * 3600 * seconds, end: (24 * 3600 * seconds) - (15 * seconds), maxT: (24 * 3600 * seconds) - (15 * seconds), step: 15 * seconds, queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: 24 * 3600 * seconds, minT: 24 * 3600 * seconds, end: (2 * 24 * 3600 * seconds) - (15 * seconds), maxT: (2 * 24 * 3600 * seconds) - (15 * seconds), step: 15 * seconds, queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: 2 * 24 * 3600 * seconds, minT: 2 * 24 * 3600 * seconds, end: 3 * 24 * 3600 * seconds, maxT: 3 * 24 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr}, + &PrometheusRangeQueryRequest{start: 3 * 3600 * seconds, minT: 3*3600*seconds - lookbackDelta.Milliseconds() + 1, end: (24 * 3600 * seconds) - (15 * seconds), maxT: (24 * 3600 * seconds) - (15 * seconds), step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: 24 * 3600 * seconds, minT: 24*3600*seconds - lookbackDelta.Milliseconds() + 1, end: (2 * 24 * 3600 * seconds) - (15 * seconds), maxT: (2 * 24 * 3600 * seconds) - (15 * seconds), step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: 2 * 24 * 3600 * seconds, minT: 2*24*3600*seconds - lookbackDelta.Milliseconds() + 1, end: 3 * 24 * 3600 * seconds, maxT: 3 * 24 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, }, interval: day, }, { - input: &PrometheusRangeQueryRequest{start: 2 * 3600 * seconds, end: 3 * 3 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr}, + input: &PrometheusRangeQueryRequest{start: 2 * 3600 * seconds, end: 3 * 3 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{start: 2 * 3600 * seconds, end: (3 * 3600 * seconds) - (15 * seconds), minT: 2 * 3600 * seconds, maxT: (3 * 3600 * seconds) - (15 * seconds), step: 15 * seconds, queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: 3 * 3600 * seconds, end: (2 * 3 * 3600 * seconds) - (15 * seconds), minT: 3 * 3600 * seconds, maxT: (2 * 3 * 3600 * seconds) - (15 * seconds), step: 15 * seconds, queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: 2 * 3 * 3600 * seconds, end: 3 * 3 * 3600 * seconds, minT: 2 * 3 * 3600 * seconds, maxT: 3 * 3 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr}, + &PrometheusRangeQueryRequest{start: 2 * 3600 * seconds, end: (3 * 3600 * seconds) - (15 * seconds), minT: 2*3600*seconds - lookbackDelta.Milliseconds() + 1, maxT: (3 * 3600 * seconds) - (15 * seconds), step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: 3 * 3600 * seconds, end: (2 * 3 * 3600 * seconds) - (15 * seconds), minT: 3*3600*seconds - lookbackDelta.Milliseconds() + 1, maxT: (2 * 3 * 3600 * seconds) - (15 * seconds), step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: 2 * 3 * 3600 * seconds, end: 3 * 3 * 3600 * seconds, minT: 2*3*3600*seconds - lookbackDelta.Milliseconds() + 1, maxT: 3 * 3 * 3600 * seconds, step: 15 * seconds, queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, }, interval: 3 * time.Hour, }, { - input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-14T23:48:00Z"), end: timeToMillis(t, "2021-10-15T00:03:00Z"), step: 5 * time.Minute.Milliseconds(), queryExpr: queryFooExpr}, + input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-14T23:48:00Z"), end: timeToMillis(t, "2021-10-15T00:03:00Z"), step: 5 * time.Minute.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-14T23:48:00Z"), end: timeToMillis(t, "2021-10-15T00:03:00Z"), minT: timeToMillis(t, "2021-10-14T23:48:00Z"), maxT: timeToMillis(t, "2021-10-15T00:03:00Z"), step: 5 * time.Minute.Milliseconds(), queryExpr: queryFooExpr}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-14T23:48:00Z"), end: timeToMillis(t, "2021-10-15T00:03:00Z"), minT: timeToMillis(t, "2021-10-14T23:48:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-15T00:03:00Z"), step: 5 * time.Minute.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, }, interval: day, }, { - input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-14T23:48:00Z"), end: timeToMillis(t, "2021-10-15T00:00:00Z"), step: 6 * time.Minute.Milliseconds(), queryExpr: queryFooExpr}, + input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-14T23:48:00Z"), end: timeToMillis(t, "2021-10-15T00:00:00Z"), step: 6 * time.Minute.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-14T23:48:00Z"), end: timeToMillis(t, "2021-10-14T23:54:00Z"), minT: timeToMillis(t, "2021-10-14T23:48:00Z"), maxT: timeToMillis(t, "2021-10-14T23:54:00Z"), step: 6 * time.Minute.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T00:00:00Z"), end: timeToMillis(t, "2021-10-15T00:00:00Z"), minT: timeToMillis(t, "2021-10-15T00:00:00Z"), maxT: timeToMillis(t, "2021-10-15T00:00:00Z"), step: 6 * time.Minute.Milliseconds(), queryExpr: queryFooExpr}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-14T23:48:00Z"), end: timeToMillis(t, "2021-10-14T23:54:00Z"), minT: timeToMillis(t, "2021-10-14T23:48:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-14T23:54:00Z"), step: 6 * time.Minute.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T00:00:00Z"), end: timeToMillis(t, "2021-10-15T00:00:00Z"), minT: timeToMillis(t, "2021-10-15T00:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-15T00:00:00Z"), step: 6 * time.Minute.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, }, interval: day, }, { - input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-14T22:00:00Z"), end: timeToMillis(t, "2021-10-17T22:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, + input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-14T22:00:00Z"), end: timeToMillis(t, "2021-10-17T22:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-14T22:00:00Z"), end: timeToMillis(t, "2021-10-14T22:00:00Z"), minT: timeToMillis(t, "2021-10-14T22:00:00Z"), maxT: timeToMillis(t, "2021-10-14T22:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T22:00:00Z"), end: timeToMillis(t, "2021-10-15T22:00:00Z"), minT: timeToMillis(t, "2021-10-15T22:00:00Z"), maxT: timeToMillis(t, "2021-10-15T22:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-16T22:00:00Z"), end: timeToMillis(t, "2021-10-16T22:00:00Z"), minT: timeToMillis(t, "2021-10-16T22:00:00Z"), maxT: timeToMillis(t, "2021-10-16T22:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-17T22:00:00Z"), end: timeToMillis(t, "2021-10-17T22:00:00Z"), minT: timeToMillis(t, "2021-10-17T22:00:00Z"), maxT: timeToMillis(t, "2021-10-17T22:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-14T22:00:00Z"), end: timeToMillis(t, "2021-10-14T22:00:00Z"), minT: timeToMillis(t, "2021-10-14T22:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-14T22:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T22:00:00Z"), end: timeToMillis(t, "2021-10-15T22:00:00Z"), minT: timeToMillis(t, "2021-10-15T22:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-15T22:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-16T22:00:00Z"), end: timeToMillis(t, "2021-10-16T22:00:00Z"), minT: timeToMillis(t, "2021-10-16T22:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-16T22:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-17T22:00:00Z"), end: timeToMillis(t, "2021-10-17T22:00:00Z"), minT: timeToMillis(t, "2021-10-17T22:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-17T22:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, }, interval: day, }, { - input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T00:00:00Z"), end: timeToMillis(t, "2021-10-18T00:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, + input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T00:00:00Z"), end: timeToMillis(t, "2021-10-18T00:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T00:00:00Z"), end: timeToMillis(t, "2021-10-15T00:00:00Z"), minT: timeToMillis(t, "2021-10-15T00:00:00Z"), maxT: timeToMillis(t, "2021-10-15T00:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-16T00:00:00Z"), end: timeToMillis(t, "2021-10-16T00:00:00Z"), minT: timeToMillis(t, "2021-10-16T00:00:00Z"), maxT: timeToMillis(t, "2021-10-16T00:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-17T00:00:00Z"), end: timeToMillis(t, "2021-10-17T00:00:00Z"), minT: timeToMillis(t, "2021-10-17T00:00:00Z"), maxT: timeToMillis(t, "2021-10-17T00:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-18T00:00:00Z"), end: timeToMillis(t, "2021-10-18T00:00:00Z"), minT: timeToMillis(t, "2021-10-18T00:00:00Z"), maxT: timeToMillis(t, "2021-10-18T00:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T00:00:00Z"), end: timeToMillis(t, "2021-10-15T00:00:00Z"), minT: timeToMillis(t, "2021-10-15T00:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-15T00:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-16T00:00:00Z"), end: timeToMillis(t, "2021-10-16T00:00:00Z"), minT: timeToMillis(t, "2021-10-16T00:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-16T00:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-17T00:00:00Z"), end: timeToMillis(t, "2021-10-17T00:00:00Z"), minT: timeToMillis(t, "2021-10-17T00:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-17T00:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-18T00:00:00Z"), end: timeToMillis(t, "2021-10-18T00:00:00Z"), minT: timeToMillis(t, "2021-10-18T00:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-18T00:00:00Z"), step: 24 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, }, interval: day, }, { - input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T22:00:00Z"), end: timeToMillis(t, "2021-10-22T04:00:00Z"), step: 30 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, + input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T22:00:00Z"), end: timeToMillis(t, "2021-10-22T04:00:00Z"), step: 30 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T22:00:00Z"), end: timeToMillis(t, "2021-10-15T22:00:00Z"), minT: timeToMillis(t, "2021-10-15T22:00:00Z"), maxT: timeToMillis(t, "2021-10-15T22:00:00Z"), step: 30 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-17T04:00:00Z"), end: timeToMillis(t, "2021-10-17T04:00:00Z"), minT: timeToMillis(t, "2021-10-17T04:00:00Z"), maxT: timeToMillis(t, "2021-10-17T04:00:00Z"), step: 30 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-18T10:00:00Z"), end: timeToMillis(t, "2021-10-18T10:00:00Z"), minT: timeToMillis(t, "2021-10-18T10:00:00Z"), maxT: timeToMillis(t, "2021-10-18T10:00:00Z"), step: 30 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-19T16:00:00Z"), end: timeToMillis(t, "2021-10-19T16:00:00Z"), minT: timeToMillis(t, "2021-10-19T16:00:00Z"), maxT: timeToMillis(t, "2021-10-19T16:00:00Z"), step: 30 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-20T22:00:00Z"), end: timeToMillis(t, "2021-10-20T22:00:00Z"), minT: timeToMillis(t, "2021-10-20T22:00:00Z"), maxT: timeToMillis(t, "2021-10-20T22:00:00Z"), step: 30 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-22T04:00:00Z"), end: timeToMillis(t, "2021-10-22T04:00:00Z"), minT: timeToMillis(t, "2021-10-22T04:00:00Z"), maxT: timeToMillis(t, "2021-10-22T04:00:00Z"), step: 30 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T22:00:00Z"), end: timeToMillis(t, "2021-10-15T22:00:00Z"), minT: timeToMillis(t, "2021-10-15T22:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-15T22:00:00Z"), step: 30 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-17T04:00:00Z"), end: timeToMillis(t, "2021-10-17T04:00:00Z"), minT: timeToMillis(t, "2021-10-17T04:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-17T04:00:00Z"), step: 30 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-18T10:00:00Z"), end: timeToMillis(t, "2021-10-18T10:00:00Z"), minT: timeToMillis(t, "2021-10-18T10:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-18T10:00:00Z"), step: 30 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-19T16:00:00Z"), end: timeToMillis(t, "2021-10-19T16:00:00Z"), minT: timeToMillis(t, "2021-10-19T16:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-19T16:00:00Z"), step: 30 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-20T22:00:00Z"), end: timeToMillis(t, "2021-10-20T22:00:00Z"), minT: timeToMillis(t, "2021-10-20T22:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-20T22:00:00Z"), step: 30 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-22T04:00:00Z"), end: timeToMillis(t, "2021-10-22T04:00:00Z"), minT: timeToMillis(t, "2021-10-22T04:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-22T04:00:00Z"), step: 30 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, }, interval: day, }, { - input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T06:00:00Z"), end: timeToMillis(t, "2021-10-17T14:00:00Z"), step: 12 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, + input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T06:00:00Z"), end: timeToMillis(t, "2021-10-17T14:00:00Z"), step: 12 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T06:00:00Z"), end: timeToMillis(t, "2021-10-15T18:00:00Z"), minT: timeToMillis(t, "2021-10-15T06:00:00Z"), maxT: timeToMillis(t, "2021-10-15T18:00:00Z"), step: 12 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-16T06:00:00Z"), end: timeToMillis(t, "2021-10-16T18:00:00Z"), minT: timeToMillis(t, "2021-10-16T06:00:00Z"), maxT: timeToMillis(t, "2021-10-16T18:00:00Z"), step: 12 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-17T06:00:00Z"), end: timeToMillis(t, "2021-10-17T14:00:00Z"), minT: timeToMillis(t, "2021-10-17T06:00:00Z"), maxT: timeToMillis(t, "2021-10-17T14:00:00Z"), step: 12 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T06:00:00Z"), end: timeToMillis(t, "2021-10-15T18:00:00Z"), minT: timeToMillis(t, "2021-10-15T06:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-15T18:00:00Z"), step: 12 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-16T06:00:00Z"), end: timeToMillis(t, "2021-10-16T18:00:00Z"), minT: timeToMillis(t, "2021-10-16T06:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-16T18:00:00Z"), step: 12 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-17T06:00:00Z"), end: timeToMillis(t, "2021-10-17T14:00:00Z"), minT: timeToMillis(t, "2021-10-17T06:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-17T14:00:00Z"), step: 12 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, }, interval: day, }, { - input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T06:00:00Z"), end: timeToMillis(t, "2021-10-17T18:00:00Z"), step: 12 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, + input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T06:00:00Z"), end: timeToMillis(t, "2021-10-17T18:00:00Z"), step: 12 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T06:00:00Z"), end: timeToMillis(t, "2021-10-15T18:00:00Z"), minT: timeToMillis(t, "2021-10-15T06:00:00Z"), maxT: timeToMillis(t, "2021-10-15T18:00:00Z"), step: 12 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-16T06:00:00Z"), end: timeToMillis(t, "2021-10-16T18:00:00Z"), minT: timeToMillis(t, "2021-10-16T06:00:00Z"), maxT: timeToMillis(t, "2021-10-16T18:00:00Z"), step: 12 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-17T06:00:00Z"), end: timeToMillis(t, "2021-10-17T18:00:00Z"), minT: timeToMillis(t, "2021-10-17T06:00:00Z"), maxT: timeToMillis(t, "2021-10-17T18:00:00Z"), step: 12 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T06:00:00Z"), end: timeToMillis(t, "2021-10-15T18:00:00Z"), minT: timeToMillis(t, "2021-10-15T06:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-15T18:00:00Z"), step: 12 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-16T06:00:00Z"), end: timeToMillis(t, "2021-10-16T18:00:00Z"), minT: timeToMillis(t, "2021-10-16T06:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-16T18:00:00Z"), step: 12 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-17T06:00:00Z"), end: timeToMillis(t, "2021-10-17T18:00:00Z"), minT: timeToMillis(t, "2021-10-17T06:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-17T18:00:00Z"), step: 12 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, }, interval: day, }, { - input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T06:00:00Z"), end: timeToMillis(t, "2021-10-17T18:00:00Z"), step: 10 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, + input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T06:00:00Z"), end: timeToMillis(t, "2021-10-17T18:00:00Z"), step: 10 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T06:00:00Z"), end: timeToMillis(t, "2021-10-15T16:00:00Z"), minT: timeToMillis(t, "2021-10-15T06:00:00Z"), maxT: timeToMillis(t, "2021-10-15T16:00:00Z"), step: 10 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-16T02:00:00Z"), end: timeToMillis(t, "2021-10-16T22:00:00Z"), minT: timeToMillis(t, "2021-10-16T02:00:00Z"), maxT: timeToMillis(t, "2021-10-16T22:00:00Z"), step: 10 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-17T08:00:00Z"), end: timeToMillis(t, "2021-10-17T18:00:00Z"), minT: timeToMillis(t, "2021-10-17T08:00:00Z"), maxT: timeToMillis(t, "2021-10-17T18:00:00Z"), step: 10 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T06:00:00Z"), end: timeToMillis(t, "2021-10-15T16:00:00Z"), minT: timeToMillis(t, "2021-10-15T06:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-15T16:00:00Z"), step: 10 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-16T02:00:00Z"), end: timeToMillis(t, "2021-10-16T22:00:00Z"), minT: timeToMillis(t, "2021-10-16T02:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-16T22:00:00Z"), step: 10 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-17T08:00:00Z"), end: timeToMillis(t, "2021-10-17T18:00:00Z"), minT: timeToMillis(t, "2021-10-17T08:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-17T18:00:00Z"), step: 10 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, }, interval: day, }, { - input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T06:00:00Z"), end: timeToMillis(t, "2021-10-17T08:00:00Z"), step: 10 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, + input: &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T06:00:00Z"), end: timeToMillis(t, "2021-10-17T08:00:00Z"), step: 10 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, expected: []MetricsQueryRequest{ - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T06:00:00Z"), end: timeToMillis(t, "2021-10-15T16:00:00Z"), minT: timeToMillis(t, "2021-10-15T06:00:00Z"), maxT: timeToMillis(t, "2021-10-15T16:00:00Z"), step: 10 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-16T02:00:00Z"), end: timeToMillis(t, "2021-10-16T22:00:00Z"), minT: timeToMillis(t, "2021-10-16T02:00:00Z"), maxT: timeToMillis(t, "2021-10-16T22:00:00Z"), step: 10 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, - &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-17T08:00:00Z"), end: timeToMillis(t, "2021-10-17T08:00:00Z"), minT: timeToMillis(t, "2021-10-17T08:00:00Z"), maxT: timeToMillis(t, "2021-10-17T08:00:00Z"), step: 10 * time.Hour.Milliseconds(), queryExpr: queryFooExpr}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-15T06:00:00Z"), end: timeToMillis(t, "2021-10-15T16:00:00Z"), minT: timeToMillis(t, "2021-10-15T06:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-15T16:00:00Z"), step: 10 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-16T02:00:00Z"), end: timeToMillis(t, "2021-10-16T22:00:00Z"), minT: timeToMillis(t, "2021-10-16T02:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-16T22:00:00Z"), step: 10 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, + &PrometheusRangeQueryRequest{start: timeToMillis(t, "2021-10-17T08:00:00Z"), end: timeToMillis(t, "2021-10-17T08:00:00Z"), minT: timeToMillis(t, "2021-10-17T08:00:00Z") - lookbackDelta.Milliseconds() + 1, maxT: timeToMillis(t, "2021-10-17T08:00:00Z"), step: 10 * time.Hour.Milliseconds(), queryExpr: queryFooExpr, lookbackDelta: lookbackDelta}, }, interval: day, }, } { - t.Run(fmt.Sprintf("%d: start: %v, end: %v, step: %v", i, tc.input.GetStart(), tc.input.GetEnd(), tc.input.GetStep()), func(t *testing.T) { + t.Run(fmt.Sprintf("%d: start: %v, end: %v, step: %v: %v", i, tc.input.GetStart(), tc.input.GetEnd(), tc.input.GetStep(), tc.input.GetQuery()), func(t *testing.T) { days, err := splitQueryByInterval(tc.input, tc.interval) require.NoError(t, err) require.Equal(t, tc.expected, days) diff --git a/pkg/frontend/querymiddleware/split_by_instant_interval_test.go b/pkg/frontend/querymiddleware/split_by_instant_interval_test.go index d544b24afff..4f79d7bcbc4 100644 --- a/pkg/frontend/querymiddleware/split_by_instant_interval_test.go +++ b/pkg/frontend/querymiddleware/split_by_instant_interval_test.go @@ -377,8 +377,8 @@ func TestInstantQuerySplittingCorrectness(t *testing.T) { expectedSplitQueries: 0, expectedSkippedNonSplittable: 1, }, - "holt_winters": { - query: `holt_winters(metric_counter[1m], 0.5, 0.9)`, + "double_exponential_smoothing": { + query: `double_exponential_smoothing(metric_counter[1m], 0.5, 0.9)`, expectedSplitQueries: 0, expectedSkippedNonSplittable: 1, }, diff --git a/pkg/frontend/querymiddleware/stats_test.go b/pkg/frontend/querymiddleware/stats_test.go index bb47b5b621b..7285cf5ec79 100644 --- a/pkg/frontend/querymiddleware/stats_test.go +++ b/pkg/frontend/querymiddleware/stats_test.go @@ -56,7 +56,7 @@ func Test_queryStatsMiddleware_Do(t *testing.T) { QuerierStats: &querier_stats.Stats{}, Start: start.Truncate(time.Millisecond), End: end.Truncate(time.Millisecond), - MinT: start.Truncate(time.Millisecond).Add(-5 * time.Minute), + MinT: start.Truncate(time.Millisecond).Add(-5 * time.Minute).Add(time.Millisecond), // query range is left-open, but minT is inclusive MaxT: end.Truncate(time.Millisecond), Step: step, }, @@ -90,7 +90,7 @@ func Test_queryStatsMiddleware_Do(t *testing.T) { QuerierStats: &querier_stats.Stats{}, Start: start.Truncate(time.Millisecond), End: end.Truncate(time.Millisecond), - MinT: start.Truncate(time.Millisecond).Add(-5 * time.Minute), + MinT: start.Truncate(time.Millisecond).Add(-5 * time.Minute).Add(time.Millisecond), // query range is left-open, but minT is inclusive MaxT: end.Truncate(time.Millisecond), Step: step, }, @@ -122,7 +122,7 @@ func Test_queryStatsMiddleware_Do(t *testing.T) { QuerierStats: &querier_stats.Stats{}, Start: start.Truncate(time.Millisecond), End: start.Truncate(time.Millisecond), - MinT: start.Truncate(time.Millisecond).Add(-5 * time.Minute), + MinT: start.Truncate(time.Millisecond).Add(-5 * time.Minute).Add(time.Millisecond), // query range is left-open, but minT is inclusive MaxT: start.Truncate(time.Millisecond), }, }, @@ -174,7 +174,7 @@ func Test_queryStatsMiddleware_Do(t *testing.T) { QuerierStats: &querier_stats.Stats{}, Start: start.Truncate(time.Millisecond).Add(-30 * time.Minute), End: end.Truncate(time.Millisecond).Add(10 * time.Minute), - MinT: start.Truncate(time.Millisecond).Add(-30 * time.Minute), + MinT: start.Truncate(time.Millisecond).Add(-30 * time.Minute).Add(time.Millisecond), // query range is left-open, but minT is inclusive MaxT: end.Truncate(time.Millisecond).Add(10 * time.Minute), }, }, @@ -217,7 +217,7 @@ func Test_queryStatsMiddleware_Do(t *testing.T) { QuerierStats: &querier_stats.Stats{}, Start: start.Truncate(time.Millisecond), End: end.Truncate(time.Millisecond).Add(10 * time.Minute), - MinT: start.Truncate(time.Millisecond), + MinT: start.Truncate(time.Millisecond).Add(time.Millisecond), // query range is left-open, but minT is inclusive MaxT: end.Truncate(time.Millisecond).Add(10 * time.Minute), }, }, diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 46a79abf4b4..5dcc43f0d5d 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -2670,7 +2670,7 @@ func (i *Ingester) createTSDB(userID string, walReplayConcurrency int) (*userTSD oooTW := i.limits.OutOfOrderTimeWindow(userID) // Create a new user database - db, err := tsdb.Open(udir, userLogger, tsdbPromReg, &tsdb.Options{ + db, err := tsdb.Open(udir, util_log.SlogFromGoKit(userLogger), tsdbPromReg, &tsdb.Options{ RetentionDuration: i.cfg.BlocksStorageConfig.TSDB.Retention.Milliseconds(), MinBlockDuration: blockRanges[0], MaxBlockDuration: blockRanges[len(blockRanges)-1], diff --git a/pkg/ingester/ingester_early_compaction_test.go b/pkg/ingester/ingester_early_compaction_test.go index 5cf29b7de99..c12b6bb65dc 100644 --- a/pkg/ingester/ingester_early_compaction_test.go +++ b/pkg/ingester/ingester_early_compaction_test.go @@ -12,13 +12,13 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" "github.com/grafana/dskit/test" "github.com/grafana/dskit/user" "github.com/oklog/ulid" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -692,7 +692,7 @@ func listBlocksInDir(t *testing.T, dir string) (ids []ulid.ULID) { } func readMetricSamplesFromBlockDir(t *testing.T, blockDir string, metricName string) (results model.Matrix) { - block, err := tsdb.OpenBlock(log.NewNopLogger(), blockDir, nil) + block, err := tsdb.OpenBlock(promslog.NewNopLogger(), blockDir, nil) require.NoError(t, err) defer func() { require.NoError(t, block.Close()) diff --git a/pkg/ingester/user_tsdb_test.go b/pkg/ingester/user_tsdb_test.go index 8d9d0ac8ce8..ebf024e7112 100644 --- a/pkg/ingester/user_tsdb_test.go +++ b/pkg/ingester/user_tsdb_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" "github.com/stretchr/testify/assert" @@ -198,7 +199,7 @@ func TestNextForcedHeadCompactionRange(t *testing.T) { } func TestGetSeriesCountAndMinLocalLimit(t *testing.T) { - tsdbDB, err := tsdb.Open(t.TempDir(), log.NewNopLogger(), nil, tsdb.DefaultOptions(), nil) + tsdbDB, err := tsdb.Open(t.TempDir(), promslog.NewNopLogger(), nil, tsdb.DefaultOptions(), nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tsdbDB.Close()) diff --git a/pkg/mimir/promexts.go b/pkg/mimir/promexts.go new file mode 100644 index 00000000000..1fc17671b60 --- /dev/null +++ b/pkg/mimir/promexts.go @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package mimir + +import ( + "github.com/prometheus/common/model" + + "github.com/grafana/mimir/pkg/util/promqlext" +) + +func init() { + promqlext.ExtendPromQL() + // Mimir doesn't support Prometheus' UTF-8 metric/label name scheme yet. + model.NameValidationScheme = model.LegacyValidation +} diff --git a/pkg/mimirtool/backfill/backfill.go b/pkg/mimirtool/backfill/backfill.go index ff92f7d774b..cf435005be8 100644 --- a/pkg/mimirtool/backfill/backfill.go +++ b/pkg/mimirtool/backfill/backfill.go @@ -15,9 +15,9 @@ import ( "time" "github.com/alecthomas/units" - "github.com/go-kit/log" "github.com/pkg/errors" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" @@ -93,7 +93,7 @@ func CreateBlocks(input IteratorCreator, mint, maxt int64, maxSamplesInAppender for t := mint; t <= maxt; t = t + blockDuration/2 { err := func() error { - w, err := tsdb.NewBlockWriter(log.NewNopLogger(), outputDir, blockDuration) + w, err := tsdb.NewBlockWriter(promslog.NewNopLogger(), outputDir, blockDuration) if err != nil { return errors.Wrap(err, "block writer") } diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 0ff63176856..70d8ddc488f 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -58,6 +58,7 @@ import ( "github.com/grafana/mimir/pkg/util" "github.com/grafana/mimir/pkg/util/globalerror" "github.com/grafana/mimir/pkg/util/limiter" + util_log "github.com/grafana/mimir/pkg/util/log" "github.com/grafana/mimir/pkg/util/test" ) @@ -2830,7 +2831,7 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), queryable) // nolint:errcheck engine := promql.NewEngine(promql.EngineOpts{ - Logger: logger, + Logger: util_log.SlogFromGoKit(logger), Timeout: 10 * time.Second, MaxSamples: 1e6, }) diff --git a/pkg/querier/distributor_queryable_streaming_test.go b/pkg/querier/distributor_queryable_streaming_test.go index d2738f711c3..b79bdb883b1 100644 --- a/pkg/querier/distributor_queryable_streaming_test.go +++ b/pkg/querier/distributor_queryable_streaming_test.go @@ -56,7 +56,7 @@ func TestStreamingChunkSeries_HappyPath(t *testing.T) { require.Equal(t, 1.0, m.SumCounters("cortex_distributor_query_ingester_chunks_deduped_total")) require.Equal(t, uint64(3), queryStats.FetchedChunksCount) - require.Equal(t, uint64(114), queryStats.FetchedChunkBytes) + require.Equal(t, uint64(111), queryStats.FetchedChunkBytes) } func assertChunkIteratorsEqual(t testing.TB, c1, c2 chunkenc.Iterator) { diff --git a/pkg/querier/duplicates_test.go b/pkg/querier/duplicates_test.go index 97d8b5598d9..1b3d29e21aa 100644 --- a/pkg/querier/duplicates_test.go +++ b/pkg/querier/duplicates_test.go @@ -11,8 +11,8 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" @@ -94,7 +94,7 @@ func runPromQLAndGetJSONResult(t *testing.T, query string, ts mimirpb.TimeSeries tq := &testQueryable{ts: newTimeSeriesSeriesSet([]mimirpb.TimeSeries{ts})} engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Timeout: 10 * time.Second, MaxSamples: 1e6, }) diff --git a/pkg/querier/engine/config.go b/pkg/querier/engine/config.go index 2298e018467..e9fbf896d0a 100644 --- a/pkg/querier/engine/config.go +++ b/pkg/querier/engine/config.go @@ -13,6 +13,7 @@ import ( "github.com/grafana/mimir/pkg/streamingpromql" //lint:ignore faillint streamingpromql is fine "github.com/grafana/mimir/pkg/util/activitytracker" //lint:ignore faillint activitytracker is fine + util_log "github.com/grafana/mimir/pkg/util/log" //lint:ignore faillint log is fine ) // Config holds the PromQL engine config exposed by Mimir. @@ -58,7 +59,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { // to indicate whether the experimental PromQL functions should be enabled. func NewPromQLEngineOptions(cfg Config, activityTracker *activitytracker.ActivityTracker, logger log.Logger, reg prometheus.Registerer) (promql.EngineOpts, streamingpromql.EngineOpts, bool) { commonOpts := promql.EngineOpts{ - Logger: logger, + Logger: util_log.SlogFromGoKit(logger), Reg: reg, ActiveQueryTracker: newQueryTracker(activityTracker), MaxSamples: cfg.MaxSamples, diff --git a/pkg/querier/error_translate_queryable_test.go b/pkg/querier/error_translate_queryable_test.go index 2a580b9b0a2..9c28ce9b025 100644 --- a/pkg/querier/error_translate_queryable_test.go +++ b/pkg/querier/error_translate_queryable_test.go @@ -13,12 +13,12 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/user" "github.com/grafana/regexp" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/route" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" @@ -135,7 +135,7 @@ func TestApiStatusCodes(t *testing.T) { func createPrometheusAPI(q storage.SampleAndChunkQueryable) *route.Router { engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, ActiveQueryTracker: nil, MaxSamples: 100, @@ -157,13 +157,15 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable) *route.Router { nil, // Only needed for admin APIs. "", // This is for snapshots, which is disabled when admin APIs are disabled. Hence empty. false, // Disable admin APIs. - log.NewNopLogger(), + promslog.NewNopLogger(), func(context.Context) v1.RulesRetriever { return &DummyRulesRetriever{} }, 0, 0, 0, // Remote read samples and concurrency limit. false, // Not an agent. regexp.MustCompile(".*"), func() (v1.RuntimeInfo, error) { return v1.RuntimeInfo{}, errors.New("not implemented") }, &v1.PrometheusVersion{}, + nil, + nil, prometheus.DefaultGatherer, nil, nil, diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 9f9a9f5ea5c..c3ded9fd672 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -16,6 +16,7 @@ import ( "github.com/grafana/dskit/user" "github.com/pkg/errors" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" @@ -37,6 +38,7 @@ import ( "github.com/grafana/mimir/pkg/querier/stats" "github.com/grafana/mimir/pkg/storage/chunk" "github.com/grafana/mimir/pkg/util" + util_log "github.com/grafana/mimir/pkg/util/log" "github.com/grafana/mimir/pkg/util/spanlogger" "github.com/grafana/mimir/pkg/util/test" "github.com/grafana/mimir/pkg/util/validation" @@ -315,7 +317,7 @@ func TestQuerier_QueryableReturnsChunksOutsideQueriedRange(t *testing.T) { require.NoError(t, err) engine := promql.NewEngine(promql.EngineOpts{ - Logger: logger, + Logger: util_log.SlogFromGoKit(logger), MaxSamples: 1e6, Timeout: 1 * time.Minute, }) @@ -402,7 +404,7 @@ func TestBatchMergeChunks(t *testing.T) { nil) engine := promql.NewEngine(promql.EngineOpts{ - Logger: logger, + Logger: util_log.SlogFromGoKit(logger), MaxSamples: 1e6, Timeout: 1 * time.Minute, }) @@ -474,7 +476,7 @@ func BenchmarkQueryExecute(b *testing.B) { nil) engine := promql.NewEngine(promql.EngineOpts{ - Logger: logger, + Logger: util_log.SlogFromGoKit(logger), MaxSamples: 1e6, Timeout: 1 * time.Minute, }) @@ -618,10 +620,10 @@ func TestQuerier_QueryIngestersWithinConfig(t *testing.T) { } dir := t.TempDir() - queryTracker := promql.NewActiveQueryTracker(dir, 10, log.NewNopLogger()) + queryTracker := promql.NewActiveQueryTracker(dir, 10, promslog.NewNopLogger()) engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), ActiveQueryTracker: queryTracker, MaxSamples: 1e6, Timeout: 1 * time.Minute, @@ -690,7 +692,7 @@ func TestQuerier_ValidateQueryTimeRange(t *testing.T) { } engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), MaxSamples: 1e6, Timeout: 1 * time.Minute, LookbackDelta: engineLookbackDelta, @@ -756,13 +758,13 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLength(t *testing.T) { query: "rate(foo[31d])", queryStartTime: time.Now().Add(-time.Hour), queryEndTime: time.Now(), - expected: errors.Errorf("expanding series: %s", NewMaxQueryLengthError(745*time.Hour, 720*time.Hour)), + expected: errors.Errorf("expanding series: %s", NewMaxQueryLengthError(745*time.Hour-time.Millisecond, 720*time.Hour)), }, "should forbid query on large time range over the limit and short rate time window": { query: "rate(foo[1m])", queryStartTime: time.Now().Add(-maxQueryLength).Add(-time.Hour), queryEndTime: time.Now(), - expected: errors.Errorf("expanding series: %s", NewMaxQueryLengthError((721*time.Hour)+time.Minute, 720*time.Hour)), + expected: errors.Errorf("expanding series: %s", NewMaxQueryLengthError((721*time.Hour)+time.Minute-time.Millisecond, 720*time.Hour)), }, } @@ -783,7 +785,7 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLength(t *testing.T) { // Create the PromQL engine to execute the query. engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), ActiveQueryTracker: nil, MaxSamples: 1e6, Timeout: 1 * time.Minute, @@ -876,7 +878,7 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { logger := log.NewNopLogger() // Create the PromQL engine to execute the queries. engine := promql.NewEngine(promql.EngineOpts{ - Logger: logger, + Logger: util_log.SlogFromGoKit(logger), ActiveQueryTracker: nil, MaxSamples: 1e6, LookbackDelta: engineLookbackDelta, @@ -1124,11 +1126,11 @@ func TestQuerier_ValidateQueryTimeRange_MaxLabelsQueryRange(t *testing.T) { func testRangeQuery(t testing.TB, queryable storage.Queryable, end model.Time, q query) *promql.Result { dir := t.TempDir() - queryTracker := promql.NewActiveQueryTracker(dir, 10, log.NewNopLogger()) + queryTracker := promql.NewActiveQueryTracker(dir, 10, promslog.NewNopLogger()) from, through, step := time.Unix(0, 0), end.Time(), q.step engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), ActiveQueryTracker: queryTracker, MaxSamples: 1e6, Timeout: 1 * time.Minute, @@ -1278,10 +1280,10 @@ func TestQuerier_QueryStoreAfterConfig(t *testing.T) { } dir := t.TempDir() - queryTracker := promql.NewActiveQueryTracker(dir, 10, log.NewNopLogger()) + queryTracker := promql.NewActiveQueryTracker(dir, 10, promslog.NewNopLogger()) engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), ActiveQueryTracker: queryTracker, MaxSamples: 1e6, Timeout: 1 * time.Minute, diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index c7050ac4493..b27e60d47c3 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -50,6 +50,9 @@ type PusherAppender struct { userID string } +func (a *PusherAppender) SetOptions(*storage.AppendOptions) { +} + func (a *PusherAppender) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { a.labels = append(a.labels, mimirpb.FromLabelsToLabelAdapters(l)) a.samples = append(a.samples, mimirpb.Sample{ @@ -83,6 +86,10 @@ func (a *PusherAppender) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels return 0, errors.New("CT zero samples are unsupported") } +func (a *PusherAppender) AppendHistogramCTZeroSample(storage.SeriesRef, labels.Labels, int64, int64, *histogram.Histogram, *histogram.FloatHistogram) (storage.SeriesRef, error) { + return 0, errors.New("CT zero samples are unsupported") +} + func (a *PusherAppender) Commit() error { a.totalWrites.Inc() @@ -143,6 +150,9 @@ func (t *PusherAppendable) Appender(ctx context.Context) storage.Appender { type NoopAppender struct{} +func (a *NoopAppender) SetOptions(*storage.AppendOptions) { +} + func (a *NoopAppender) Append(_ storage.SeriesRef, _ labels.Labels, _ int64, _ float64) (storage.SeriesRef, error) { return 0, nil } @@ -163,6 +173,10 @@ func (a *NoopAppender) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, return 0, errors.New("CT zero samples are unsupported") } +func (a *NoopAppender) AppendHistogramCTZeroSample(storage.SeriesRef, labels.Labels, int64, int64, *histogram.Histogram, *histogram.FloatHistogram) (storage.SeriesRef, error) { + return 0, errors.New("CT zero samples are unsupported") +} + func (a *NoopAppender) Commit() error { return nil } @@ -375,7 +389,7 @@ func DefaultTenantManagerFactory( GroupEvaluationContextFunc: FederatedGroupContextFunc, ExternalURL: cfg.ExternalURL.URL, NotifyFunc: rules.SendAlerts(notifier, cfg.ExternalURL.String()), - Logger: log.With(logger, "component", "ruler", "insight", true, "user", userID), + Logger: util_log.SlogFromGoKit(log.With(logger, "component", "ruler", "insight", true, "user", userID)), Registerer: reg, OutageTolerance: cfg.OutageTolerance, ForGracePeriod: cfg.ForGracePeriod, diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go index da2327819b7..35be76d9a56 100644 --- a/pkg/ruler/compat_test.go +++ b/pkg/ruler/compat_test.go @@ -22,6 +22,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" @@ -41,6 +42,7 @@ import ( "github.com/grafana/mimir/pkg/querier/api" "github.com/grafana/mimir/pkg/ruler/rulespb" "github.com/grafana/mimir/pkg/storage/series" + util_log "github.com/grafana/mimir/pkg/util/log" "github.com/grafana/mimir/pkg/util/test" ) @@ -492,11 +494,11 @@ func TestDefaultManagerFactory_CorrectQueryableUsed(t *testing.T) { // setup cfg := defaultRulerConfig(t) options := applyPrepareOptions(t, cfg.Ring.Common.InstanceID) - notifierManager := notifier.NewManager(¬ifier.Options{Do: func(_ context.Context, _ *http.Client, _ *http.Request) (*http.Response, error) { return nil, nil }}, options.logger) + notifierManager := notifier.NewManager(¬ifier.Options{Do: func(_ context.Context, _ *http.Client, _ *http.Request) (*http.Response, error) { return nil, nil }}, util_log.SlogFromGoKit(options.logger)) ruleFiles := writeRuleGroupToFiles(t, cfg.RulePath, options.logger, userID, tc.ruleGroup) regularQueryable, federatedQueryable := newMockQueryable(), newMockQueryable() - tracker := promql.NewActiveQueryTracker(t.TempDir(), 20, log.NewNopLogger()) + tracker := promql.NewActiveQueryTracker(t.TempDir(), 20, promslog.NewNopLogger()) eng := promql.NewEngine(promql.EngineOpts{ MaxSamples: 1e6, ActiveQueryTracker: tracker, @@ -560,10 +562,10 @@ func TestDefaultManagerFactory_ShouldNotWriteRecordingRuleResultsWhenDisabled(t var ( options = applyPrepareOptions(t, cfg.Ring.Common.InstanceID) - notifierManager = notifier.NewManager(¬ifier.Options{Do: func(_ context.Context, _ *http.Client, _ *http.Request) (*http.Response, error) { return nil, nil }}, options.logger) + notifierManager = notifier.NewManager(¬ifier.Options{Do: func(_ context.Context, _ *http.Client, _ *http.Request) (*http.Response, error) { return nil, nil }}, util_log.SlogFromGoKit(options.logger)) ruleFiles = writeRuleGroupToFiles(t, cfg.RulePath, options.logger, userID, ruleGroup) queryable = newMockQueryable() - tracker = promql.NewActiveQueryTracker(t.TempDir(), 20, log.NewNopLogger()) + tracker = promql.NewActiveQueryTracker(t.TempDir(), 20, util_log.SlogFromGoKit(log.NewNopLogger())) eng = promql.NewEngine(promql.EngineOpts{ MaxSamples: 1e6, ActiveQueryTracker: tracker, @@ -648,8 +650,8 @@ func TestDefaultManagerFactory_ShouldInjectReadConsistencyToContextBasedOnRuleDe var ( cfg = defaultRulerConfig(t) options = applyPrepareOptions(t, cfg.Ring.Common.InstanceID) - notifierManager = notifier.NewManager(¬ifier.Options{Do: func(_ context.Context, _ *http.Client, _ *http.Request) (*http.Response, error) { return nil, nil }}, options.logger) - tracker = promql.NewActiveQueryTracker(t.TempDir(), 20, options.logger) + notifierManager = notifier.NewManager(¬ifier.Options{Do: func(_ context.Context, _ *http.Client, _ *http.Request) (*http.Response, error) { return nil, nil }}, util_log.SlogFromGoKit(options.logger)) + tracker = promql.NewActiveQueryTracker(t.TempDir(), 20, util_log.SlogFromGoKit(options.logger)) eng = promql.NewEngine(promql.EngineOpts{ MaxSamples: 1e6, ActiveQueryTracker: tracker, @@ -727,8 +729,8 @@ func TestDefaultManagerFactory_ShouldInjectStrongReadConsistencyToContextWhenQue var ( options = applyPrepareOptions(t, cfg.Ring.Common.InstanceID) - notifierManager = notifier.NewManager(¬ifier.Options{Do: func(_ context.Context, _ *http.Client, _ *http.Request) (*http.Response, error) { return nil, nil }}, options.logger) - tracker = promql.NewActiveQueryTracker(t.TempDir(), 20, options.logger) + notifierManager = notifier.NewManager(¬ifier.Options{Do: func(_ context.Context, _ *http.Client, _ *http.Request) (*http.Response, error) { return nil, nil }}, util_log.SlogFromGoKit(options.logger)) + tracker = promql.NewActiveQueryTracker(t.TempDir(), 20, util_log.SlogFromGoKit(options.logger)) eng = promql.NewEngine(promql.EngineOpts{ MaxSamples: 1e6, ActiveQueryTracker: tracker, diff --git a/pkg/ruler/notifier.go b/pkg/ruler/notifier.go index 0fd0175b1cd..348aa7b9067 100644 --- a/pkg/ruler/notifier.go +++ b/pkg/ruler/notifier.go @@ -26,6 +26,7 @@ import ( "github.com/prometheus/prometheus/notifier" "github.com/grafana/mimir/pkg/util" + util_log "github.com/grafana/mimir/pkg/util/log" ) var ( @@ -84,10 +85,11 @@ func newRulerNotifier(o *notifier.Options, l gklog.Logger) (*rulerNotifier, erro if err != nil { return nil, err } + sl := util_log.SlogFromGoKit(l) return &rulerNotifier{ - notifier: notifier.NewManager(o, l), + notifier: notifier.NewManager(o, sl), sdCancel: sdCancel, - sdManager: discovery.NewManager(sdCtx, l, o.Registerer, sdMetrics), + sdManager: discovery.NewManager(sdCtx, sl, o.Registerer, sdMetrics), logger: l, }, nil } diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index ff52af87bf1..390d147295e 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -38,7 +38,6 @@ import ( "github.com/prometheus/prometheus/rules" promRules "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/util/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/thanos-io/objstore" @@ -141,7 +140,7 @@ type prepareOptions struct { } func applyPrepareOptions(t *testing.T, instanceID string, opts ...prepareOption) prepareOptions { - defaultLogger := testutil.NewLogger(t) + defaultLogger := log.Logger(utiltest.NewTestingLogger(t)) defaultLogger = log.With(defaultLogger, "instance", instanceID) defaultLogger = level.NewFilter(defaultLogger, level.AllowInfo()) diff --git a/pkg/storage/ingest/reader_test.go b/pkg/storage/ingest/reader_test.go index 2eb6300a99d..2243683ab2d 100644 --- a/pkg/storage/ingest/reader_test.go +++ b/pkg/storage/ingest/reader_test.go @@ -21,7 +21,6 @@ import ( "github.com/grafana/dskit/test" "github.com/prometheus/client_golang/prometheus" promtest "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/prometheus/prometheus/util/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/twmb/franz-go/pkg/kadm" @@ -319,7 +318,7 @@ func TestPartitionReader_WaitReadConsistencyUntilLastProducedOffset_And_WaitRead # TYPE cortex_ingest_storage_strong_consistency_requests_total counter cortex_ingest_storage_strong_consistency_requests_total{component="partition-reader", with_offset="%t"} 1 cortex_ingest_storage_strong_consistency_requests_total{component="partition-reader", with_offset="%t"} 0 - + # HELP cortex_ingest_storage_strong_consistency_failures_total Total number of failures while waiting for strong consistency to be enforced. # TYPE cortex_ingest_storage_strong_consistency_failures_total counter cortex_ingest_storage_strong_consistency_failures_total{component="partition-reader"} 0 @@ -369,7 +368,7 @@ func TestPartitionReader_WaitReadConsistencyUntilLastProducedOffset_And_WaitRead # TYPE cortex_ingest_storage_strong_consistency_requests_total counter cortex_ingest_storage_strong_consistency_requests_total{component="partition-reader", with_offset="%t"} 1 cortex_ingest_storage_strong_consistency_requests_total{component="partition-reader", with_offset="%t"} 0 - + # HELP cortex_ingest_storage_strong_consistency_failures_total Total number of failures while waiting for strong consistency to be enforced. # TYPE cortex_ingest_storage_strong_consistency_failures_total counter cortex_ingest_storage_strong_consistency_failures_total{component="partition-reader"} 1 @@ -422,7 +421,7 @@ func TestPartitionReader_WaitReadConsistencyUntilLastProducedOffset_And_WaitRead # TYPE cortex_ingest_storage_strong_consistency_requests_total counter cortex_ingest_storage_strong_consistency_requests_total{component="partition-reader", with_offset="%t"} 1 cortex_ingest_storage_strong_consistency_requests_total{component="partition-reader", with_offset="%t"} 0 - + # HELP cortex_ingest_storage_strong_consistency_failures_total Total number of failures while waiting for strong consistency to be enforced. # TYPE cortex_ingest_storage_strong_consistency_failures_total counter cortex_ingest_storage_strong_consistency_failures_total{component="partition-reader"} 1 @@ -459,7 +458,7 @@ func TestPartitionReader_WaitReadConsistencyUntilLastProducedOffset_And_WaitRead # TYPE cortex_ingest_storage_strong_consistency_requests_total counter cortex_ingest_storage_strong_consistency_requests_total{component="partition-reader", with_offset="%t"} 1 cortex_ingest_storage_strong_consistency_requests_total{component="partition-reader", with_offset="%t"} 0 - + # HELP cortex_ingest_storage_strong_consistency_failures_total Total number of failures while waiting for strong consistency to be enforced. # TYPE cortex_ingest_storage_strong_consistency_failures_total counter cortex_ingest_storage_strong_consistency_failures_total{component="partition-reader"} 0 @@ -2473,7 +2472,7 @@ func TestPartitionCommitter(t *testing.T) { return res, nil, true }) - logger := testutil.NewLogger(t) + logger := mimirtest.NewTestingLogger(t) cfg := createTestKafkaConfig(clusterAddr, topicName) client, err := kgo.NewClient(commonKafkaClientOptions(cfg, nil, logger)...) require.NoError(t, err) @@ -2614,7 +2613,7 @@ func TestPartitionCommitter_commit(t *testing.T) { func newKafkaProduceClient(t *testing.T, addrs string) *kgo.Client { writeClient, err := kgo.NewClient( kgo.SeedBrokers(addrs), - kgo.WithLogger(NewKafkaLogger(testutil.NewLogger(t))), + kgo.WithLogger(NewKafkaLogger(mimirtest.NewTestingLogger(t))), // We will choose the partition of each record. kgo.RecordPartitioner(kgo.ManualPartitioner()), ) diff --git a/pkg/storage/tsdb/block/block_generator.go b/pkg/storage/tsdb/block/block_generator.go index d88ab990c4e..752bbdb3b8a 100644 --- a/pkg/storage/tsdb/block/block_generator.go +++ b/pkg/storage/tsdb/block/block_generator.go @@ -19,6 +19,7 @@ import ( "github.com/grafana/dskit/runutil" "github.com/oklog/ulid" "github.com/pkg/errors" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" @@ -273,7 +274,7 @@ func CreateBlock( if err := g.Wait(); err != nil { return id, err } - c, err := tsdb.NewLeveledCompactor(ctx, nil, log.NewNopLogger(), []int64{maxt - mint}, nil, nil) + c, err := tsdb.NewLeveledCompactor(ctx, nil, promslog.NewNopLogger(), []int64{maxt - mint}, nil, nil) if err != nil { return id, errors.Wrap(err, "create compactor") } diff --git a/pkg/storage/tsdb/block/index.go b/pkg/storage/tsdb/block/index.go index ddc08435ed9..c81c99f230f 100644 --- a/pkg/storage/tsdb/block/index.go +++ b/pkg/storage/tsdb/block/index.go @@ -29,6 +29,8 @@ import ( "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/index" "golang.org/x/exp/slices" + + util_log "github.com/grafana/mimir/pkg/util/log" ) // VerifyBlock does a full run over a block index and chunk data and verifies that they fulfill the order invariants. @@ -421,7 +423,7 @@ func Repair(ctx context.Context, logger log.Logger, dir string, id ulid.ULID, so return resid, errors.New("cannot repair downsampled block") } - b, err := tsdb.OpenBlock(logger, bdir, nil) + b, err := tsdb.OpenBlock(util_log.SlogFromGoKit(logger), bdir, nil) if err != nil { return resid, errors.Wrap(err, "open block") } diff --git a/pkg/storegateway/bucket_stores_test.go b/pkg/storegateway/bucket_stores_test.go index 8647a093e10..f74a637070f 100644 --- a/pkg/storegateway/bucket_stores_test.go +++ b/pkg/storegateway/bucket_stores_test.go @@ -25,6 +25,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/tsdb" @@ -651,7 +652,7 @@ func generateStorageBlock(t *testing.T, storageDir, userID string, metricName st // then it will be snapshotted to the storage directory. tmpDir := t.TempDir() - db, err := tsdb.Open(tmpDir, log.NewNopLogger(), nil, tsdb.DefaultOptions(), nil) + db, err := tsdb.Open(tmpDir, promslog.NewNopLogger(), nil, tsdb.DefaultOptions(), nil) require.NoError(t, err) defer func() { require.NoError(t, db.Close()) diff --git a/pkg/storegateway/bucket_test.go b/pkg/storegateway/bucket_test.go index 98d658d38ef..9b3bb195bb6 100644 --- a/pkg/storegateway/bucket_test.go +++ b/pkg/storegateway/bucket_test.go @@ -37,6 +37,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" promtest "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" @@ -1149,7 +1150,7 @@ func appendTestSeries(series int) func(testing.TB, func() storage.Appender) { func createBlockFromHead(t testing.TB, dir string, head *tsdb.Head) ulid.ULID { // Put a 3 MiB limit on segment files so we can test with many segment files without creating too big blocks. - compactor, err := tsdb.NewLeveledCompactorWithChunkSize(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, 3*1024*1024, nil) + compactor, err := tsdb.NewLeveledCompactorWithChunkSize(context.Background(), nil, promslog.NewNopLogger(), []int64{1000000}, nil, 3*1024*1024, nil) assert.NoError(t, err) assert.NoError(t, os.MkdirAll(dir, 0777)) diff --git a/pkg/storegateway/prometheus_test.go b/pkg/storegateway/prometheus_test.go index e8a225ec215..ac3f5db2653 100644 --- a/pkg/storegateway/prometheus_test.go +++ b/pkg/storegateway/prometheus_test.go @@ -6,7 +6,7 @@ import ( "context" "testing" - "github.com/go-kit/log" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" promtsdb "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunks" @@ -18,7 +18,7 @@ import ( ) func openPromBlocks(t testing.TB, dir string) []promtsdb.BlockReader { - promDB, err := promtsdb.OpenDBReadOnly(dir, "", log.NewNopLogger()) + promDB, err := promtsdb.OpenDBReadOnly(dir, "", promslog.NewNopLogger()) require.NoError(t, err) promBlocks, err := promDB.Blocks() require.NoError(t, err) diff --git a/pkg/streamingpromql/engine_test.go b/pkg/streamingpromql/engine_test.go index fa4a3555ccd..d9cafe90bbe 100644 --- a/pkg/streamingpromql/engine_test.go +++ b/pkg/streamingpromql/engine_test.go @@ -364,7 +364,7 @@ func TestRangeVectorSelectors(t *testing.T) { ts time.Time }{ "matches series with points in range": { - expr: "some_metric[1m]", + expr: "some_metric[1m1s]", ts: baseT.Add(2 * time.Minute), expected: &promql.Result{ Value: promql.Matrix{ @@ -400,7 +400,7 @@ func TestRangeVectorSelectors(t *testing.T) { }, }, "does not return points outside range if last selected point does not align to end of range": { - expr: "some_metric_with_gaps[1m]", + expr: "some_metric_with_gaps[1m1s]", ts: baseT.Add(2 * time.Minute), expected: &promql.Result{ Value: promql.Matrix{ @@ -414,7 +414,7 @@ func TestRangeVectorSelectors(t *testing.T) { }, }, "metric with stale marker": { - expr: "some_metric_with_stale_marker[3m]", + expr: "some_metric_with_stale_marker[3m1s]", ts: baseT.Add(3 * time.Minute), expected: &promql.Result{ Value: promql.Matrix{ @@ -430,7 +430,7 @@ func TestRangeVectorSelectors(t *testing.T) { }, }, "histogram: matches series with points in range": { - expr: "incr_histogram[1m]", + expr: "incr_histogram[1m1s]", ts: baseT.Add(2 * time.Minute), expected: &promql.Result{ Value: promql.Matrix{ @@ -523,7 +523,7 @@ func TestRangeVectorSelectors(t *testing.T) { }, }, "histogram: does not return points outside range if last selected point does not align to end of range": { - expr: "histogram_with_gaps[1m]", + expr: "histogram_with_gaps[1m1s]", ts: baseT.Add(2 * time.Minute), expected: &promql.Result{ Value: promql.Matrix{ @@ -553,7 +553,7 @@ func TestRangeVectorSelectors(t *testing.T) { }, }, "histogram: metric with stale marker": { - expr: "histogram_with_stale_marker[3m]", + expr: "histogram_with_stale_marker[3m1s]", ts: baseT.Add(3 * time.Minute), expected: &promql.Result{ Value: promql.Matrix{ @@ -676,7 +676,7 @@ func TestRangeVectorSelectors(t *testing.T) { }, "mixed series with a float then a histogram": { // No incorrect lookback - expr: "mixed_metric_float_first[2m]", + expr: "mixed_metric_float_first[2m1s]", ts: baseT.Add(2 * time.Minute), expected: &promql.Result{ Value: promql.Matrix{ @@ -711,7 +711,7 @@ func TestRangeVectorSelectors(t *testing.T) { }, }, "selector with positive offset (looking backwards)": { - expr: "some_metric[1m] offset 1m", + expr: "some_metric[1m1s] offset 1m", ts: baseT.Add(3 * time.Minute), expected: &promql.Result{ Value: promql.Matrix{ @@ -733,7 +733,7 @@ func TestRangeVectorSelectors(t *testing.T) { }, }, "selector with negative offset (looking forwards)": { - expr: "some_metric[1m] offset -1m", + expr: "some_metric[1m1s] offset -1m", ts: baseT.Add(1 * time.Minute), expected: &promql.Result{ Value: promql.Matrix{ @@ -769,7 +769,7 @@ func TestRangeVectorSelectors(t *testing.T) { }, }, "selector with @ modifier": { - expr: "some_metric[1m] @ 2m", + expr: "some_metric[1m1s] @ 2m", ts: baseT.Add(20 * time.Minute), expected: &promql.Result{ Value: promql.Matrix{ @@ -791,7 +791,7 @@ func TestRangeVectorSelectors(t *testing.T) { }, }, "selector with @ modifier and offset": { - expr: "some_metric[1m] @ 3m offset 1m", + expr: "some_metric[1m1s] @ 3m offset 1m", ts: baseT.Add(20 * time.Minute), expected: &promql.Result{ Value: promql.Matrix{ @@ -910,7 +910,7 @@ func TestSubqueries(t *testing.T) { }, { // A query where SeriesMetadata returns some series but evaluates to no samples should not return anything. - Query: `(metric > Inf)[20s:10s]`, + Query: `(metric{type="floats"} > Inf)[20s:10s]`, Start: time.Unix(30, 0), Result: promql.Result{ Value: promql.Matrix{}, @@ -918,7 +918,7 @@ func TestSubqueries(t *testing.T) { }, { // A nested subquery with the same properties as above. - Query: `last_over_time((metric > Inf)[20s:10s])[30s:5s]`, + Query: `last_over_time((metric{type="floats"} > Inf)[20s:10s])[30s:5s]`, Start: time.Unix(30, 0), Result: promql.Result{ Value: promql.Matrix{}, @@ -988,16 +988,32 @@ func TestSubqueries(t *testing.T) { }, Start: time.Unix(35, 0), }, + { + Query: "metric[20s:5s]", + Result: promql.Result{ + Value: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, + Metric: labels.FromStrings("__name__", "metric", "type", "floats"), + }, + promql.Series{ + Histograms: []promql.HPoint{{H: &histogram.FloatHistogram{Count: 2}, T: 15000}, {H: &histogram.FloatHistogram{Count: 2}, T: 20000}, {H: &histogram.FloatHistogram{Count: 2}, T: 25000}, {H: &histogram.FloatHistogram{Count: 2}, T: 30000}}, + Metric: labels.FromStrings("__name__", "metric", "type", "histograms"), + }, + }, + }, + Start: time.Unix(30, 0), + }, { Query: "metric[20s:5s] offset 5s", Result: promql.Result{ Value: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 2, T: 10000}, {F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, + Floats: []promql.FPoint{{F: 2, T: 15000}, {F: 2, T: 20000}, {F: 2, T: 25000}, {F: 2, T: 30000}}, Metric: labels.FromStrings("__name__", "metric", "type", "floats"), }, promql.Series{ - Histograms: []promql.HPoint{{H: &histogram.FloatHistogram{Count: 2}, T: 10000}, {H: &histogram.FloatHistogram{Count: 2}, T: 15000}, {H: &histogram.FloatHistogram{Count: 2}, T: 20000}, {H: &histogram.FloatHistogram{Count: 2}, T: 25000}, {H: &histogram.FloatHistogram{Count: 2}, T: 30000}}, + Histograms: []promql.HPoint{{H: &histogram.FloatHistogram{Count: 2}, T: 15000}, {H: &histogram.FloatHistogram{Count: 2}, T: 20000}, {H: &histogram.FloatHistogram{Count: 2}, T: 25000}, {H: &histogram.FloatHistogram{Count: 2}, T: 30000}}, Metric: labels.FromStrings("__name__", "metric", "type", "histograms"), }, }, @@ -1038,6 +1054,18 @@ func TestSubqueries(t *testing.T) { }, { // Normal selector. Query: `http_requests{group=~"pro.*",instance="0"}[30s:10s]`, + Result: promql.Result{ + Value: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 10000, T: 10000000}, {F: 100, T: 10010000}, {F: 130, T: 10020000}}, + Metric: labels.FromStrings("__name__", "http_requests", "job", "api-server", "instance", "0", "group", "production"), + }, + }, + }, + Start: time.Unix(10020, 0), + }, + { // Normal selector. Add 1ms to the range to see the legacy behavior of the previous test. + Query: `http_requests{group=~"pro.*",instance="0"}[30s1ms:10s]`, Result: promql.Result{ Value: promql.Matrix{ promql.Series{ @@ -1074,6 +1102,35 @@ func TestSubqueries(t *testing.T) { }, { Query: `rate(http_requests[1m])[15s:5s]`, + Result: promql.Result{ + Value: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"), + DropName: true, + }, + promql.Series{ + Floats: []promql.FPoint{{F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"), + DropName: true, + }, + promql.Series{ + Floats: []promql.FPoint{{F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"), + DropName: true, + }, + promql.Series{ + Floats: []promql.FPoint{{F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"), + DropName: true, + }, + }, + Warnings: annotations.New().Add(annotations.NewPossibleNonCounterInfo("http_requests", posrange.PositionRange{Start: 5})), + }, + Start: time.Unix(8000, 0), + }, + { + Query: `rate(http_requests[1m])[15s1ms:5s]`, // Add 1ms to the range to see the legacy behavior of the previous test. Result: promql.Result{ Value: promql.Matrix{ promql.Series{ @@ -1106,15 +1163,39 @@ func TestSubqueries(t *testing.T) { Result: promql.Result{ Value: promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 270, T: 90000}, {F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}}, + Floats: []promql.FPoint{{F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}}, Metric: labels.EmptyLabels(), }, }, }, Start: time.Unix(120, 0), }, + { + Query: `sum(http_requests{group=~"pro.*"})[30s:10s]`, + Result: promql.Result{ + Value: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 300, T: 100000}, {F: 330, T: 110000}, {F: 360, T: 120000}}, + Metric: labels.EmptyLabels(), + }, + }, + }, + Start: time.Unix(121, 0), // 1s later doesn't change the result compared to above. + }, { Query: `sum(http_requests)[40s:10s]`, + Result: promql.Result{ + Value: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 900, T: 90000}, {F: 1000, T: 100000}, {F: 1100, T: 110000}, {F: 1200, T: 120000}}, + Metric: labels.EmptyLabels(), + }, + }, + }, + Start: time.Unix(120, 0), + }, + { + Query: `sum(http_requests)[40s1ms:10s]`, // Add 1ms to the range to see the legacy behavior of the previous test. Result: promql.Result{ Value: promql.Matrix{ promql.Series{ @@ -1127,6 +1208,18 @@ func TestSubqueries(t *testing.T) { }, { Query: `(sum(http_requests{group=~"p.*"})+sum(http_requests{group=~"c.*"}))[20s:5s]`, + Result: promql.Result{ + Value: promql.Matrix{ + promql.Series{ + Floats: []promql.FPoint{{F: 1000, T: 105000}, {F: 1100, T: 110000}, {F: 1100, T: 115000}, {F: 1200, T: 120000}}, + Metric: labels.EmptyLabels(), + }, + }, + }, + Start: time.Unix(120, 0), + }, + { + Query: `(sum(http_requests{group=~"p.*"})+sum(http_requests{group=~"c.*"}))[20s1ms:5s]`, // Add 1ms to the range to see the legacy behavior of the previous test. Result: promql.Result{ Value: promql.Matrix{ promql.Series{ @@ -1904,6 +1997,46 @@ func TestAnnotations(t *testing.T) { expr: `sum(metric{type="histogram"})`, }, + "stdvar() with only floats": { + data: mixedFloatHistogramData, + expr: `stdvar(metric{type="float"})`, + }, + "stdvar() with only native histograms": { + data: mixedFloatHistogramData, + expr: `stdvar(metric{type="histogram"})`, + expectedInfoAnnotations: []string{"PromQL info: ignored histogram in stdvar aggregation (1:8)"}, + }, + + "stddev() with only floats": { + data: mixedFloatHistogramData, + expr: `stddev(metric{type="float"})`, + }, + "stddev() with only native histograms": { + data: mixedFloatHistogramData, + expr: `stddev(metric{type="histogram"})`, + expectedInfoAnnotations: []string{"PromQL info: ignored histogram in stddev aggregation (1:8)"}, + }, + + "min() with only floats": { + data: mixedFloatHistogramData, + expr: `min(metric{type="float"})`, + }, + "min() with only native histograms": { + data: mixedFloatHistogramData, + expr: `min(metric{type="histogram"})`, + expectedInfoAnnotations: []string{"PromQL info: ignored histogram in min aggregation (1:5)"}, + }, + + "max() with only floats": { + data: mixedFloatHistogramData, + expr: `max(metric{type="float"})`, + }, + "max() with only native histograms": { + data: mixedFloatHistogramData, + expr: `max(metric{type="histogram"})`, + expectedInfoAnnotations: []string{"PromQL info: ignored histogram in max aggregation (1:5)"}, + }, + "avg() with float and native histogram at same step": { data: mixedFloatHistogramData, expr: "avg by (series) (metric)", @@ -1939,19 +2072,19 @@ func TestAnnotations(t *testing.T) { "sum_over_time() over series with both floats and histograms": { data: `some_metric 10 {{schema:0 sum:1 count:1 buckets:[1]}}`, - expr: `sum_over_time(some_metric[1m])`, + expr: `sum_over_time(some_metric[1m1s])`, expectedWarningAnnotations: []string{`PromQL warning: encountered a mix of histograms and floats for metric name "some_metric" (1:15)`}, }, "sum_over_time() over native histograms with both exponential and custom buckets": { data: nativeHistogramsWithCustomBucketsData, - expr: `sum_over_time(metric{series="mixed-exponential-custom-buckets"}[1m])`, + expr: `sum_over_time(metric{series="mixed-exponential-custom-buckets"}[1m1s])`, expectedWarningAnnotations: []string{ `PromQL warning: vector contains a mix of histograms with exponential and custom buckets schemas for metric name "metric" (1:15)`, }, }, "sum_over_time() over native histograms with incompatible custom buckets": { data: nativeHistogramsWithCustomBucketsData, - expr: `sum_over_time(metric{series="incompatible-custom-buckets"}[1m])`, + expr: `sum_over_time(metric{series="incompatible-custom-buckets"}[1m1s])`, expectedWarningAnnotations: []string{ `PromQL warning: vector contains histograms with incompatible custom buckets for metric name "metric" (1:15)`, }, @@ -1959,19 +2092,19 @@ func TestAnnotations(t *testing.T) { "avg_over_time() over series with both floats and histograms": { data: `some_metric 10 {{schema:0 sum:1 count:1 buckets:[1]}}`, - expr: `avg_over_time(some_metric[1m])`, + expr: `avg_over_time(some_metric[1m1s])`, expectedWarningAnnotations: []string{`PromQL warning: encountered a mix of histograms and floats for metric name "some_metric" (1:15)`}, }, "avg_over_time() over native histograms with both exponential and custom buckets": { data: nativeHistogramsWithCustomBucketsData, - expr: `avg_over_time(metric{series="mixed-exponential-custom-buckets"}[1m])`, + expr: `avg_over_time(metric{series="mixed-exponential-custom-buckets"}[1m1s])`, expectedWarningAnnotations: []string{ `PromQL warning: vector contains a mix of histograms with exponential and custom buckets schemas for metric name "metric" (1:15)`, }, }, "avg_over_time() over native histograms with incompatible custom buckets": { data: nativeHistogramsWithCustomBucketsData, - expr: `avg_over_time(metric{series="incompatible-custom-buckets"}[1m])`, + expr: `avg_over_time(metric{series="incompatible-custom-buckets"}[1m1s])`, expectedWarningAnnotations: []string{ `PromQL warning: vector contains histograms with incompatible custom buckets for metric name "metric" (1:15)`, }, @@ -1999,14 +2132,14 @@ func TestAnnotations(t *testing.T) { float_metric 10 20 other_float_metric 10 20 `, - expr: "rate(mixed_metric_count[1m]) + rate(other_mixed_metric_count[1m]) + rate(float_metric[1m]) + rate(other_float_metric[1m])", + expr: "rate(mixed_metric_count[1m1s]) + rate(other_mixed_metric_count[1m1s]) + rate(float_metric[1m1s]) + rate(other_float_metric[1m1s])", expectedWarningAnnotations: []string{ `PromQL warning: encountered a mix of histograms and floats for metric name "mixed_metric_count" (1:6)`, - `PromQL warning: encountered a mix of histograms and floats for metric name "other_mixed_metric_count" (1:37)`, + `PromQL warning: encountered a mix of histograms and floats for metric name "other_mixed_metric_count" (1:39)`, }, expectedInfoAnnotations: []string{ - `PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "float_metric" (1:74)`, - `PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "other_float_metric" (1:99)`, + `PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "float_metric" (1:78)`, + `PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "other_float_metric" (1:105)`, }, }, } @@ -2016,29 +2149,29 @@ func TestAnnotations(t *testing.T) { position := len(fmt.Sprintf("%s(", function)) + 1 testCases[fmt.Sprintf("%s() over metric without counter suffix containing only floats", function)] = testCase{ data: mixedFloatHistogramData, - expr: fmt.Sprintf(`%s(metric{type="float"}[1m])`, function), + expr: fmt.Sprintf(`%s(metric{type="float"}[1m1s])`, function), expectedInfoAnnotations: []string{fmt.Sprintf(`PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "metric" (1:%d)`, position)}, } testCases[fmt.Sprintf("%s() over metric without counter suffix containing only native histograms", function)] = testCase{ data: mixedFloatHistogramData, - expr: fmt.Sprintf(`%s(metric{type="histogram"}[1m])`, function), + expr: fmt.Sprintf(`%s(metric{type="histogram"}[1m1s])`, function), } testCases[fmt.Sprintf("%s() over metric ending in _total", function)] = testCase{ data: `some_metric_total 0+1x3`, - expr: fmt.Sprintf(`%s(some_metric_total[1m])`, function), + expr: fmt.Sprintf(`%s(some_metric_total[1m1s])`, function), } testCases[fmt.Sprintf("%s() over metric ending in _sum", function)] = testCase{ data: `some_metric_sum 0+1x3`, - expr: fmt.Sprintf(`%s(some_metric_sum[1m])`, function), + expr: fmt.Sprintf(`%s(some_metric_sum[1m1s])`, function), } testCases[fmt.Sprintf("%s() over metric ending in _count", function)] = testCase{ data: `some_metric_count 0+1x3`, - expr: fmt.Sprintf(`%s(some_metric_count[1m])`, function), + expr: fmt.Sprintf(`%s(some_metric_count[1m1s])`, function), } testCases[fmt.Sprintf("%s() over metric ending in _bucket", function)] = testCase{ data: `some_metric_bucket 0+1x3`, - expr: fmt.Sprintf(`%s(some_metric_bucket[1m])`, function), + expr: fmt.Sprintf(`%s(some_metric_bucket[1m1s])`, function), } testCases[fmt.Sprintf("%s() over multiple metric names", function)] = testCase{ data: ` @@ -2050,7 +2183,7 @@ func TestAnnotations(t *testing.T) { not_a_counter{env="test", series="6"} 5+1x3 also_not_a_counter{env="test", series="7"} 6+1x3 `, - expr: fmt.Sprintf(`%s({__name__!=""}[1m])`, function), + expr: fmt.Sprintf(`%s({__name__!=""}[1m1s])`, function), expectedInfoAnnotations: []string{ fmt.Sprintf(`PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "not_a_counter" (1:%d)`, position), fmt.Sprintf(`PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "also_not_a_counter" (1:%d)`, position), @@ -2058,35 +2191,35 @@ func TestAnnotations(t *testing.T) { } testCases[fmt.Sprintf("%s() over series with both floats and histograms", function)] = testCase{ data: `some_metric_count 10 {{schema:0 sum:1 count:1 buckets:[1]}}`, - expr: fmt.Sprintf(`%s(some_metric_count[1m])`, function), + expr: fmt.Sprintf(`%s(some_metric_count[1m1s])`, function), expectedWarningAnnotations: []string{fmt.Sprintf(`PromQL warning: encountered a mix of histograms and floats for metric name "some_metric_count" (1:%d)`, position)}, } testCases[fmt.Sprintf("%s() over series with first histogram that is not a counter", function)] = testCase{ data: `some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2]}}`, - expr: fmt.Sprintf(`%s(some_metric[1m])`, function), + expr: fmt.Sprintf(`%s(some_metric[1m1s])`, function), expectedWarningAnnotations: []string{fmt.Sprintf(`PromQL warning: this native histogram metric is not a counter: "some_metric" (1:%d)`, position)}, } testCases[fmt.Sprintf("%s() over series with last histogram that is not a counter", function)] = testCase{ data: `some_metric {{schema:0 sum:1 count:1 buckets:[1]}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}}`, - expr: fmt.Sprintf(`%s(some_metric[1m])`, function), + expr: fmt.Sprintf(`%s(some_metric[1m1s])`, function), expectedWarningAnnotations: []string{fmt.Sprintf(`PromQL warning: this native histogram metric is not a counter: "some_metric" (1:%d)`, position)}, } testCases[fmt.Sprintf("%s() over series with a histogram that is not a counter that is neither the first or last in the range", function)] = testCase{ data: `some_metric {{schema:0 sum:1 count:1 buckets:[1]}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3]}}`, - expr: fmt.Sprintf(`%s(some_metric[2m] @ 2m)`, function), + expr: fmt.Sprintf(`%s(some_metric[2m1s] @ 2m)`, function), expectedWarningAnnotations: []string{fmt.Sprintf(`PromQL warning: this native histogram metric is not a counter: "some_metric" (1:%d)`, position)}, } testCases[fmt.Sprintf("%s() over native histograms with both exponential and custom buckets", function)] = testCase{ data: nativeHistogramsWithCustomBucketsData, - expr: fmt.Sprintf(`%s(metric{series="mixed-exponential-custom-buckets"}[1m])`, function), + expr: fmt.Sprintf(`%s(metric{series="mixed-exponential-custom-buckets"}[1m1s])`, function), expectedWarningAnnotations: []string{ fmt.Sprintf(`PromQL warning: vector contains a mix of histograms with exponential and custom buckets schemas for metric name "metric" (1:%d)`, position), }, } testCases[fmt.Sprintf("%s() over native histograms with incompatible custom buckets", function)] = testCase{ data: nativeHistogramsWithCustomBucketsData, - expr: fmt.Sprintf(`%s(metric{series="incompatible-custom-buckets"}[1m])`, function), + expr: fmt.Sprintf(`%s(metric{series="incompatible-custom-buckets"}[1m1s])`, function), expectedWarningAnnotations: []string{ fmt.Sprintf(`PromQL warning: vector contains histograms with incompatible custom buckets for metric name "metric" (1:%d)`, position), }, @@ -2095,7 +2228,7 @@ func TestAnnotations(t *testing.T) { data: ` series 3 1 {{schema:3 sum:12 count:7 buckets:[2 2 3]}} `, - expr: fmt.Sprintf("%s(series[45s])", function), + expr: fmt.Sprintf("%s(series[46s])", function), expectedWarningAnnotations: []string{}, expectedInfoAnnotations: []string{}, } @@ -2103,12 +2236,121 @@ func TestAnnotations(t *testing.T) { data: ` series 1 `, - expr: fmt.Sprintf("%s(series[1m])", function), + expr: fmt.Sprintf("%s(series[1m1s])", function), expectedWarningAnnotations: []string{}, expectedInfoAnnotations: []string{}, } } + binaryOperations := map[string]struct { + floatHistogramSupported bool + histogramFloatSupported bool + histogramHistogramSupported bool + supportsBool bool + }{ + "+": { + floatHistogramSupported: false, + histogramFloatSupported: false, + histogramHistogramSupported: true, + }, + "-": { + floatHistogramSupported: false, + histogramFloatSupported: false, + histogramHistogramSupported: true, + }, + "*": { + floatHistogramSupported: true, + histogramFloatSupported: true, + histogramHistogramSupported: false, + }, + "/": { + floatHistogramSupported: false, + histogramFloatSupported: true, + histogramHistogramSupported: false, + }, + "^": { + floatHistogramSupported: false, + histogramFloatSupported: false, + histogramHistogramSupported: false, + }, + "%": { + floatHistogramSupported: false, + histogramFloatSupported: false, + histogramHistogramSupported: false, + }, + "atan2": { + floatHistogramSupported: false, + histogramFloatSupported: false, + histogramHistogramSupported: false, + }, + "==": { + floatHistogramSupported: false, + histogramFloatSupported: false, + histogramHistogramSupported: true, + supportsBool: true, + }, + "!=": { + floatHistogramSupported: false, + histogramFloatSupported: false, + histogramHistogramSupported: true, + supportsBool: true, + }, + ">": { + floatHistogramSupported: false, + histogramFloatSupported: false, + histogramHistogramSupported: false, + supportsBool: true, + }, + "<": { + floatHistogramSupported: false, + histogramFloatSupported: false, + histogramHistogramSupported: false, + supportsBool: true, + }, + ">=": { + floatHistogramSupported: false, + histogramFloatSupported: false, + histogramHistogramSupported: false, + supportsBool: true, + }, + "<=": { + floatHistogramSupported: false, + histogramFloatSupported: false, + histogramHistogramSupported: false, + supportsBool: true, + }, + } + + addBinopTestCase := func(op string, name string, expr string, left string, right string, supported bool) { + testCase := testCase{ + data: mixedFloatHistogramData, + expr: expr, + } + + if !supported { + testCase.expectedInfoAnnotations = []string{fmt.Sprintf(`PromQL info: incompatible sample types encountered for binary operator "%v": %v %v %v (1:1)`, op, left, op, right)} + } + + testCases[name] = testCase + } + + for op, binop := range binaryOperations { + expressions := []string{op} + + if binop.supportsBool { + expressions = append(expressions, op+" bool") + } + + for _, expr := range expressions { + addBinopTestCase(op, fmt.Sprintf("binary %v between two floats", expr), fmt.Sprintf(`metric{type="float"} %v ignoring(type) metric{type="float"}`, expr), "float", "float", true) + addBinopTestCase(op, fmt.Sprintf("binary %v between a float on the left side and a histogram on the right", expr), fmt.Sprintf(`metric{type="float"} %v ignoring(type) metric{type="histogram"}`, expr), "float", "histogram", binop.floatHistogramSupported) + addBinopTestCase(op, fmt.Sprintf("binary %v between a scalar on the left side and a histogram on the right", expr), fmt.Sprintf(`2 %v metric{type="histogram"}`, expr), "float", "histogram", binop.floatHistogramSupported) + addBinopTestCase(op, fmt.Sprintf("binary %v between a histogram on the left side and a float on the right", expr), fmt.Sprintf(`metric{type="histogram"} %v ignoring(type) metric{type="float"}`, expr), "histogram", "float", binop.histogramFloatSupported) + addBinopTestCase(op, fmt.Sprintf("binary %v between a histogram on the left side and a scalar on the right", expr), fmt.Sprintf(`metric{type="histogram"} %v 2`, expr), "histogram", "float", binop.histogramFloatSupported) + addBinopTestCase(op, fmt.Sprintf("binary %v between two histograms", expr), fmt.Sprintf(`metric{type="histogram"} %v ignoring(type) metric{type="histogram"}`, expr), "histogram", "histogram", binop.histogramHistogramSupported) + } + } + opts := NewTestEngineOpts() mimirEngine, err := NewEngine(opts, NewStaticQueryLimitsProvider(0), stats.NewQueryMetrics(nil), log.NewNopLogger()) require.NoError(t, err) @@ -2278,9 +2520,20 @@ func TestCompareVariousMixedMetricsBinaryOperations(t *testing.T) { // Same thing again, this time with grouping. binaryExpr = fmt.Sprintf(`series{label="%s"}`, labels[0]) - for _, label := range labels[1:] { - binaryExpr += fmt.Sprintf(` %s on (group) series{label="%s"}`, op, label) + for i, label := range labels[1:] { + binaryExpr += fmt.Sprintf(` %s ignoring (label, group) `, op) + + if i == 0 && len(labels) > 2 { + binaryExpr += "(" + } + + binaryExpr += fmt.Sprintf(`{label="%s"}`, label) + } + + if len(labels) > 2 { + binaryExpr += ")" } + expressions = append(expressions, binaryExpr) } } diff --git a/pkg/streamingpromql/operators/aggregations/min_max.go b/pkg/streamingpromql/operators/aggregations/min_max.go index 7a3e2e20534..53d893e7f41 100644 --- a/pkg/streamingpromql/operators/aggregations/min_max.go +++ b/pkg/streamingpromql/operators/aggregations/min_max.go @@ -9,6 +9,8 @@ import ( "math" "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser/posrange" + "github.com/prometheus/prometheus/util/annotations" "github.com/grafana/mimir/pkg/streamingpromql/limiting" "github.com/grafana/mimir/pkg/streamingpromql/types" @@ -19,11 +21,12 @@ type MinMaxAggregationGroup struct { floatPresent []bool accumulatePoint func(idx int64, f float64) + isMax bool } // max represents whether this aggregation is `max` (true), or `min` (false) func NewMinMaxAggregationGroup(max bool) *MinMaxAggregationGroup { - g := &MinMaxAggregationGroup{} + g := &MinMaxAggregationGroup{isMax: max} if max { g.accumulatePoint = g.maxAccumulatePoint } else { @@ -48,11 +51,21 @@ func (g *MinMaxAggregationGroup) minAccumulatePoint(idx int64, f float64) { } } -func (g *MinMaxAggregationGroup) AccumulateSeries(data types.InstantVectorSeriesData, timeRange types.QueryTimeRange, memoryConsumptionTracker *limiting.MemoryConsumptionTracker, _ types.EmitAnnotationFunc) error { - if (len(data.Floats) > 0 || len(data.Histograms) > 0) && g.floatValues == nil { - // Even if we only have histograms, we have to populate the float slices, as we'll treat histograms as if they have value 0. - // This is consistent with Prometheus but may not be the desired value: https://github.com/prometheus/prometheus/issues/14711 +func (g *MinMaxAggregationGroup) AccumulateSeries(data types.InstantVectorSeriesData, timeRange types.QueryTimeRange, memoryConsumptionTracker *limiting.MemoryConsumptionTracker, emitAnnotation types.EmitAnnotationFunc) error { + // Native histograms are ignored for min and max. + if len(data.Histograms) > 0 { + emitAnnotation(func(_ string, expressionPosition posrange.PositionRange) error { + name := "min" + if g.isMax { + name = "max" + } + + return annotations.NewHistogramIgnoredInAggregationInfo(name, expressionPosition) + }) + } + + if len(data.Floats) > 0 && g.floatValues == nil { var err error // First series with float values for this group, populate it. g.floatValues, err = types.Float64SlicePool.Get(timeRange.StepCount, memoryConsumptionTracker) @@ -73,13 +86,6 @@ func (g *MinMaxAggregationGroup) AccumulateSeries(data types.InstantVectorSeries g.accumulatePoint(idx, p.F) } - // If a histogram exists max treats it as 0. We have to detect this here so that we return a 0 value instead of nothing. - // This is consistent with Prometheus but may not be the desired value: https://github.com/prometheus/prometheus/issues/14711 - for _, p := range data.Histograms { - idx := timeRange.PointIndex(p.T) - g.accumulatePoint(idx, 0) - } - types.PutInstantVectorSeriesData(data, memoryConsumptionTracker) return nil } diff --git a/pkg/streamingpromql/operators/aggregations/stddev_stdvar.go b/pkg/streamingpromql/operators/aggregations/stddev_stdvar.go index 3a6e9d998b9..92308b9569b 100644 --- a/pkg/streamingpromql/operators/aggregations/stddev_stdvar.go +++ b/pkg/streamingpromql/operators/aggregations/stddev_stdvar.go @@ -9,6 +9,8 @@ import ( "math" "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser/posrange" + "github.com/prometheus/prometheus/util/annotations" "github.com/grafana/mimir/pkg/streamingpromql/limiting" "github.com/grafana/mimir/pkg/streamingpromql/types" @@ -33,12 +35,23 @@ type StddevStdvarAggregationGroup struct { groupSeriesCounts []float64 } -func (g *StddevStdvarAggregationGroup) AccumulateSeries(data types.InstantVectorSeriesData, timeRange types.QueryTimeRange, memoryConsumptionTracker *limiting.MemoryConsumptionTracker, _ types.EmitAnnotationFunc) error { - var err error +func (g *StddevStdvarAggregationGroup) AccumulateSeries(data types.InstantVectorSeriesData, timeRange types.QueryTimeRange, memoryConsumptionTracker *limiting.MemoryConsumptionTracker, emitAnnotation types.EmitAnnotationFunc) error { + // Native histograms are ignored for stddev and stdvar. + if len(data.Histograms) > 0 { + emitAnnotation(func(_ string, expressionPosition posrange.PositionRange) error { + name := "stdvar" + + if g.stddev { + name = "stddev" + } + + return annotations.NewHistogramIgnoredInAggregationInfo(name, expressionPosition) + }) + } - // Native histograms are ignored for stddev if len(data.Floats) > 0 && g.floats == nil { // First series with float values for this group, populate it. + var err error g.floats, err = types.Float64SlicePool.Get(timeRange.StepCount, memoryConsumptionTracker) if err != nil { return err diff --git a/pkg/streamingpromql/operators/binops/binary_operation.go b/pkg/streamingpromql/operators/binops/binary_operation.go index 39264eeadf2..7b603a5d3ae 100644 --- a/pkg/streamingpromql/operators/binops/binary_operation.go +++ b/pkg/streamingpromql/operators/binops/binary_operation.go @@ -5,8 +5,11 @@ package binops import ( "slices" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/parser/posrange" + "github.com/prometheus/prometheus/util/annotations" "github.com/grafana/mimir/pkg/streamingpromql/limiting" "github.com/grafana/mimir/pkg/streamingpromql/types" @@ -97,3 +100,18 @@ func filterSeries(data types.InstantVectorSeriesData, mask []bool, desiredMaskVa return filteredData, nil } + +// emitIncompatibleTypesAnnotation adds an annotation to a given the presence of histograms on the left (lH) and right (rH) sides of op. +// If lH is nil, this indicates that the left side was a float, and similarly for the right side and rH. +// If lH is not nil, this indicates that the left side was a histogram, and similarly for the right side and rH. +func emitIncompatibleTypesAnnotation(a *annotations.Annotations, op parser.ItemType, lH *histogram.FloatHistogram, rH *histogram.FloatHistogram, expressionPosition posrange.PositionRange) { + a.Add(annotations.NewIncompatibleTypesInBinOpInfo(sampleTypeDescription(lH), op.String(), sampleTypeDescription(rH), expressionPosition)) +} + +func sampleTypeDescription(h *histogram.FloatHistogram) string { + if h == nil { + return "float" + } + + return "histogram" +} diff --git a/pkg/streamingpromql/operators/binops/scalar_scalar_binary_operation.go b/pkg/streamingpromql/operators/binops/scalar_scalar_binary_operation.go index 9985bcabbff..c89cb421ca7 100644 --- a/pkg/streamingpromql/operators/binops/scalar_scalar_binary_operation.go +++ b/pkg/streamingpromql/operators/binops/scalar_scalar_binary_operation.go @@ -75,7 +75,7 @@ func (s *ScalarScalarBinaryOperation) GetValues(ctx context.Context) (types.Scal for i, left := range leftValues.Samples { right := rightValues.Samples[i] - f, h, ok, err := s.opFunc(left.F, right.F, nil, nil) + f, h, ok, valid, err := s.opFunc(left.F, right.F, nil, nil) if err != nil { return types.ScalarData{}, err @@ -85,6 +85,10 @@ func (s *ScalarScalarBinaryOperation) GetValues(ctx context.Context) (types.Scal panic(fmt.Sprintf("%v binary operation between two scalars (%v and %v) did not produce a result, this should never happen", s.Op.String(), left.F, right.F)) } + if !valid { + panic(fmt.Sprintf("%v binary operation between two scalars (%v and %v) is not considered a valid operation, this should never happen", s.Op.String(), left.F, right.F)) + } + if h != nil { panic(fmt.Sprintf("%v binary operation between two scalars (%v and %v) produced a histogram result, this should never happen", s.Op.String(), left.F, right.F)) } diff --git a/pkg/streamingpromql/operators/binops/vector_scalar_binary_operation.go b/pkg/streamingpromql/operators/binops/vector_scalar_binary_operation.go index 55a9f6684b2..9b28dc5f589 100644 --- a/pkg/streamingpromql/operators/binops/vector_scalar_binary_operation.go +++ b/pkg/streamingpromql/operators/binops/vector_scalar_binary_operation.go @@ -31,12 +31,12 @@ type VectorScalarBinaryOperation struct { opFunc vectorScalarBinaryOperationFunc expressionPosition posrange.PositionRange - emitAnnotation types.EmitAnnotationFunc + annotations *annotations.Annotations scalarData types.ScalarData vectorIterator types.InstantVectorSeriesDataIterator } -type vectorScalarBinaryOperationFunc func(scalar float64, vectorF float64, vectorH *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) +type vectorScalarBinaryOperationFunc func(scalar float64, vectorF float64, vectorH *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) func NewVectorScalarBinaryOperation( scalar types.ScalarOperator, @@ -70,29 +70,23 @@ func NewVectorScalarBinaryOperation( MemoryConsumptionTracker: memoryConsumptionTracker, timeRange: timeRange, + annotations: annotations, expressionPosition: expressionPosition, } - b.emitAnnotation = func(generator types.AnnotationGenerator) { - annotations.Add(generator("", expressionPosition)) - } - if !b.ScalarIsLeftSide { - b.opFunc = func(scalar float64, vectorF float64, vectorH *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { + b.opFunc = func(scalar float64, vectorF float64, vectorH *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { return f(vectorF, scalar, vectorH, nil) } } else if op.IsComparisonOperator() && !returnBool { - b.opFunc = func(scalar float64, vectorF float64, vectorH *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { - _, _, ok, err := f(scalar, vectorF, nil, vectorH) + b.opFunc = func(scalar float64, vectorF float64, vectorH *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + _, _, keep, valid, err := f(scalar, vectorF, nil, vectorH) // We always want to return the value from the vector when we're doing a filter-style comparison. - // - // We deliberately ignore the histogram value as we need to treat it as if it were a float with value 0, - // pending the resolution of the discussion in https://github.com/prometheus/prometheus/issues/13934#issuecomment-2372947976. - return vectorF, nil, ok, err + return vectorF, vectorH, keep, valid, err } } else { - b.opFunc = func(scalar float64, vectorF float64, vectorH *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { + b.opFunc = func(scalar float64, vectorF float64, vectorH *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { return f(scalar, vectorF, nil, vectorH) } } @@ -180,9 +174,9 @@ func (v *VectorScalarBinaryOperation) NextSeries(ctx context.Context) (types.Ins v.vectorIterator.Reset(series) for { - t, vectorF, vectorH, ok := v.vectorIterator.Next() + t, vectorF, vectorH, keep := v.vectorIterator.Next() - if !ok { + if !keep { // We are done. break } @@ -190,38 +184,48 @@ func (v *VectorScalarBinaryOperation) NextSeries(ctx context.Context) (types.Ins scalarIdx := (t - v.timeRange.StartT) / v.timeRange.IntervalMilliseconds // Scalars always have a value at every step, so we can just compute the index of the corresponding scalar value from the timestamp. scalarValue := v.scalarData.Samples[scalarIdx].F - f, h, ok, err := v.opFunc(scalarValue, vectorF, vectorH) + f, h, keep, valid, err := v.opFunc(scalarValue, vectorF, vectorH) if err != nil { err = functions.NativeHistogramErrorToAnnotation(err, v.emitAnnotation) if err == nil { // Error was converted to an annotation, continue without emitting a sample here. - ok = false + continue + } + + return types.InstantVectorSeriesData{}, err + } + + if !valid { + if v.ScalarIsLeftSide { + emitIncompatibleTypesAnnotation(v.annotations, v.Op, nil, vectorH, v.expressionPosition) } else { - return types.InstantVectorSeriesData{}, err + emitIncompatibleTypesAnnotation(v.annotations, v.Op, vectorH, nil, v.expressionPosition) } } - if ok { - if h != nil { - if hPoints == nil { - // First histogram for this series, get a slice for it. - if err := prepareHPointSlice(); err != nil { - return types.InstantVectorSeriesData{}, err - } - } + if !keep { + continue + } - hPoints = append(hPoints, promql.HPoint{T: t, H: h}) - } else { - // We have a float value. - if fPoints == nil { - // First float for this series, get a slice for it. - if err := prepareFPointSlice(); err != nil { - return types.InstantVectorSeriesData{}, err - } + if h != nil { + if hPoints == nil { + // First histogram for this series, get a slice for it. + if err := prepareHPointSlice(); err != nil { + return types.InstantVectorSeriesData{}, err } + } - fPoints = append(fPoints, promql.FPoint{T: t, F: f}) + hPoints = append(hPoints, promql.HPoint{T: t, H: h}) + } else { + // We have a float value. + if fPoints == nil { + // First float for this series, get a slice for it. + if err := prepareFPointSlice(); err != nil { + return types.InstantVectorSeriesData{}, err + } } + + fPoints = append(fPoints, promql.FPoint{T: t, F: f}) } } @@ -249,4 +253,8 @@ func (v *VectorScalarBinaryOperation) Close() { types.FPointSlicePool.Put(v.scalarData.Samples, v.MemoryConsumptionTracker) } +func (v *VectorScalarBinaryOperation) emitAnnotation(generator types.AnnotationGenerator) { + v.annotations.Add(generator("", v.expressionPosition)) +} + var _ types.InstantVectorOperator = &VectorScalarBinaryOperation{} diff --git a/pkg/streamingpromql/operators/binops/vector_vector_binary_operation.go b/pkg/streamingpromql/operators/binops/vector_vector_binary_operation.go index 1005046a4d8..78fdc5d4cd8 100644 --- a/pkg/streamingpromql/operators/binops/vector_vector_binary_operation.go +++ b/pkg/streamingpromql/operators/binops/vector_vector_binary_operation.go @@ -52,7 +52,7 @@ type VectorVectorBinaryOperation struct { opFunc binaryOperationFunc expressionPosition posrange.PositionRange - emitAnnotation types.EmitAnnotationFunc + annotations *annotations.Annotations } var _ types.InstantVectorOperator = &VectorVectorBinaryOperation{} @@ -97,6 +97,7 @@ func NewVectorVectorBinaryOperation( MemoryConsumptionTracker: memoryConsumptionTracker, expressionPosition: expressionPosition, + annotations: annotations, } if returnBool { @@ -109,10 +110,6 @@ func NewVectorVectorBinaryOperation( return nil, compat.NewNotSupportedError(fmt.Sprintf("binary expression with '%s'", op)) } - b.emitAnnotation = func(generator types.AnnotationGenerator) { - annotations.Add(generator("", expressionPosition)) - } - return b, nil } @@ -493,7 +490,7 @@ func (b *VectorVectorBinaryOperation) computeResult(left types.InstantVectorSeri // We also don't know if the output will be exclusively floats or histograms, so we'll use the same size slice for both. // We only assign the slices once we see the associated point type so it shouldn't be common that we allocate both. // - // FIXME: this is not safe to do for one-to-many, many-to-one or many-to-many matching, as we may need the input series for later output series. + // FIXME: this is not safe to do for one-to-many or many-to-one matching, as we may need the input series for later output series. canReturnLeftFPointSlice, canReturnLeftHPointSlice, canReturnRightFPointSlice, canReturnRightHPointSlice := true, true, true, true leftPoints := len(left.Floats) + len(left.Histograms) rightPoints := len(right.Floats) + len(right.Histograms) @@ -565,18 +562,24 @@ func (b *VectorVectorBinaryOperation) computeResult(left types.InstantVectorSeri // denoted by lOk or rOk being false. for lOk && rOk { if lT == rT { - // Timestamps match at this step - resultFloat, resultHist, ok, err := b.opFunc(lF, rF, lH, rH) + // We have samples on both sides at this timestep. + resultFloat, resultHist, keep, valid, err := b.opFunc(lF, rF, lH, rH) + if err != nil { err = functions.NativeHistogramErrorToAnnotation(err, b.emitAnnotation) - if err == nil { - // Error was converted to an annotation, continue without emitting a sample here. - ok = false - } else { + if err != nil { return types.InstantVectorSeriesData{}, err } + + // Else: error was converted to an annotation, continue without emitting a sample here. + keep = false } - if ok { + + if !valid { + emitIncompatibleTypesAnnotation(b.annotations, b.Op, lH, rH, b.expressionPosition) + } + + if keep { if resultHist != nil { if hPoints == nil { if err = prepareHSlice(); err != nil { @@ -600,7 +603,8 @@ func (b *VectorVectorBinaryOperation) computeResult(left types.InstantVectorSeri } } } - // Move the iterator with the lower timestamp, or both if equal + + // Advance the iterator with the lower timestamp, or both if equal if lT == rT { lT, lF, lH, lOk = b.leftIterator.Next() rT, rF, rH, rOk = b.rightIterator.Next() @@ -652,140 +656,257 @@ func (b *VectorVectorBinaryOperation) Close() { } } -type binaryOperationFunc func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) +func (b *VectorVectorBinaryOperation) emitAnnotation(generator types.AnnotationGenerator) { + b.annotations.Add(generator("", b.expressionPosition)) +} + +type binaryOperationFunc func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (f float64, h *histogram.FloatHistogram, keep bool, valid bool, err error) // FIXME(jhesketh): Investigate avoiding copying histograms for binary ops. // We would need nil-out the retained FloatHistogram instances in their original HPoint slices, to avoid them being modified when the slice is returned to the pool. var arithmeticAndComparisonOperationFuncs = map[parser.ItemType]binaryOperationFunc{ - parser.ADD: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { + parser.ADD: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs == nil && hrhs == nil { + return lhs + rhs, nil, true, true, nil + } + if hlhs != nil && hrhs != nil { res, err := hlhs.Copy().Add(hrhs) if err != nil { - return 0, nil, false, err + return 0, nil, false, true, err } - return 0, res.Compact(0), true, nil + return 0, res.Compact(0), true, true, nil } - return lhs + rhs, nil, true, nil + + return 0, nil, false, false, nil }, - parser.SUB: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { + parser.SUB: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs == nil && hrhs == nil { + return lhs - rhs, nil, true, true, nil + } + if hlhs != nil && hrhs != nil { res, err := hlhs.Copy().Sub(hrhs) if err != nil { - return 0, nil, false, err + return 0, nil, false, true, err } - return 0, res.Compact(0), true, nil + return 0, res.Compact(0), true, true, nil } - return lhs - rhs, nil, true, nil + + return 0, nil, false, false, nil }, - parser.MUL: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { + parser.MUL: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs == nil && hrhs == nil { + return lhs * rhs, nil, true, true, nil + } + if hlhs != nil && hrhs == nil { - return 0, hlhs.Copy().Mul(rhs), true, nil + return 0, hlhs.Copy().Mul(rhs), true, true, nil } + if hlhs == nil && hrhs != nil { - return 0, hrhs.Copy().Mul(lhs), true, nil + return 0, hrhs.Copy().Mul(lhs), true, true, nil } - return lhs * rhs, nil, true, nil + + return 0, nil, false, false, nil }, - parser.DIV: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { + parser.DIV: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs == nil && hrhs == nil { + return lhs / rhs, nil, true, true, nil + } + if hlhs != nil && hrhs == nil { - return 0, hlhs.Copy().Div(rhs), true, nil + return 0, hlhs.Copy().Div(rhs), true, true, nil } - return lhs / rhs, nil, true, nil + + return 0, nil, false, false, nil }, - parser.POW: func(lhs, rhs float64, _, _ *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { - return math.Pow(lhs, rhs), nil, true, nil + parser.POW: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs == nil && hrhs == nil { + return math.Pow(lhs, rhs), nil, true, true, nil + } + + return 0, nil, false, false, nil }, - parser.MOD: func(lhs, rhs float64, _, _ *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { - return math.Mod(lhs, rhs), nil, true, nil + parser.MOD: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs == nil && hrhs == nil { + return math.Mod(lhs, rhs), nil, true, true, nil + } + + return 0, nil, false, false, nil }, - parser.ATAN2: func(lhs, rhs float64, _, _ *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { - return math.Atan2(lhs, rhs), nil, true, nil + parser.ATAN2: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs == nil && hrhs == nil { + return math.Atan2(lhs, rhs), nil, true, true, nil + } + + return 0, nil, false, false, nil }, - parser.EQLC: func(lhs, rhs float64, _, _ *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { - if lhs == rhs { - return lhs, nil, true, nil + parser.EQLC: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs == nil && hrhs == nil { + if lhs == rhs { + return lhs, nil, true, true, nil + } + + return 0, nil, false, true, nil + } + + if hlhs != nil && hrhs != nil { + if hlhs.Equals(hrhs) { + return 0, hlhs.Copy(), true, true, nil + } + + return 0, nil, false, true, nil } - return 0, nil, false, nil + return 0, nil, false, false, nil }, - parser.NEQ: func(lhs, rhs float64, _, _ *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { - if lhs != rhs { - return lhs, nil, true, nil + parser.NEQ: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs == nil && hrhs == nil { + if lhs != rhs { + return lhs, nil, true, true, nil + } + + return 0, nil, false, true, nil + } + + if hlhs != nil && hrhs != nil { + if !hlhs.Equals(hrhs) { + return 0, hlhs.Copy(), true, true, nil + } + + return 0, nil, false, true, nil } - return 0, nil, false, nil + return lhs, hlhs, false, false, nil }, - parser.LTE: func(lhs, rhs float64, _, _ *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { + parser.LTE: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs != nil || hrhs != nil { + return 0, nil, false, false, nil + } + if lhs <= rhs { - return lhs, nil, true, nil + return lhs, nil, true, true, nil } - return 0, nil, false, nil + return 0, nil, false, true, nil }, - parser.LSS: func(lhs, rhs float64, _, _ *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { + parser.LSS: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs != nil || hrhs != nil { + return 0, nil, false, false, nil + } + if lhs < rhs { - return lhs, nil, true, nil + return lhs, nil, true, true, nil } - return 0, nil, false, nil + return 0, nil, false, true, nil }, - parser.GTE: func(lhs, rhs float64, _, _ *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { + parser.GTE: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs != nil || hrhs != nil { + return 0, nil, false, false, nil + } + if lhs >= rhs { - return lhs, nil, true, nil + return lhs, nil, true, true, nil } - return 0, nil, false, nil + return 0, nil, false, true, nil }, - parser.GTR: func(lhs, rhs float64, _, _ *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { + parser.GTR: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs != nil || hrhs != nil { + return 0, nil, false, false, nil + } + if lhs > rhs { - return lhs, nil, true, nil + return lhs, nil, true, true, nil } - return 0, nil, false, nil + return 0, nil, false, true, nil }, } var boolComparisonOperationFuncs = map[parser.ItemType]binaryOperationFunc{ - parser.EQLC: func(lhs, rhs float64, _, _ *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { - if lhs == rhs { - return 1, nil, true, nil + parser.EQLC: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs == nil && hrhs == nil { + if lhs == rhs { + return 1, nil, true, true, nil + } + + return 0, nil, true, true, nil } - return 0, nil, true, nil + if hlhs != nil && hrhs != nil { + if hlhs.Equals(hrhs) { + return 1, nil, true, true, nil + } + + return 0, nil, true, true, nil + } + + return 0, nil, false, false, nil }, - parser.NEQ: func(lhs, rhs float64, _, _ *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { - if lhs != rhs { - return 1, nil, true, nil + parser.NEQ: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs == nil && hrhs == nil { + if lhs != rhs { + return 1, nil, true, true, nil + } + + return 0, nil, true, true, nil + } + + if hlhs != nil && hrhs != nil { + if !hlhs.Equals(hrhs) { + return 1, nil, true, true, nil + } + + return 0, nil, true, true, nil } - return 0, nil, true, nil + return 0, nil, false, false, nil }, - parser.LTE: func(lhs, rhs float64, _, _ *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { + parser.LTE: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs != nil || hrhs != nil { + return 0, nil, false, false, nil + } + if lhs <= rhs { - return 1, nil, true, nil + return 1, nil, true, true, nil } - return 0, nil, true, nil + return 0, nil, true, true, nil }, - parser.LSS: func(lhs, rhs float64, _, _ *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { + parser.LSS: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs != nil || hrhs != nil { + return 0, nil, false, false, nil + } + if lhs < rhs { - return 1, nil, true, nil + return 1, nil, true, true, nil } - return 0, nil, true, nil + return 0, nil, true, true, nil }, - parser.GTE: func(lhs, rhs float64, _, _ *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { + parser.GTE: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs != nil || hrhs != nil { + return 0, nil, false, false, nil + } + if lhs >= rhs { - return 1, nil, true, nil + return 1, nil, true, true, nil } - return 0, nil, true, nil + return 0, nil, true, true, nil }, - parser.GTR: func(lhs, rhs float64, _, _ *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { + parser.GTR: func(lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, bool, error) { + if hlhs != nil || hrhs != nil { + return 0, nil, false, false, nil + } + if lhs > rhs { - return 1, nil, true, nil + return 1, nil, true, true, nil } - return 0, nil, true, nil + return 0, nil, true, true, nil }, } diff --git a/pkg/streamingpromql/operators/selectors/instant_vector_selector.go b/pkg/streamingpromql/operators/selectors/instant_vector_selector.go index f2ce31f29f7..ac43ce695b4 100644 --- a/pkg/streamingpromql/operators/selectors/instant_vector_selector.go +++ b/pkg/streamingpromql/operators/selectors/instant_vector_selector.go @@ -41,7 +41,7 @@ func (v *InstantVectorSelector) SeriesMetadata(ctx context.Context) ([]types.Ser func (v *InstantVectorSelector) NextSeries(ctx context.Context) (types.InstantVectorSeriesData, error) { if v.memoizedIterator == nil { - v.memoizedIterator = storage.NewMemoizedEmptyIterator(v.Selector.LookbackDelta.Milliseconds()) + v.memoizedIterator = storage.NewMemoizedEmptyIterator(v.Selector.LookbackDelta.Milliseconds() - 1) // -1 to exclude samples on the lower boundary of the range. } var err error @@ -103,7 +103,7 @@ func (v *InstantVectorSelector) NextSeries(ctx context.Context) (types.InstantVe if valueType == chunkenc.ValNone || t > ts { var ok bool t, f, h, ok = v.memoizedIterator.PeekPrev() - if !ok || t < ts-v.Selector.LookbackDelta.Milliseconds() { + if !ok || t <= ts-v.Selector.LookbackDelta.Milliseconds() { continue } if h != nil { diff --git a/pkg/streamingpromql/operators/selectors/range_vector_selector.go b/pkg/streamingpromql/operators/selectors/range_vector_selector.go index 0f4dd0e9a21..6bd1457e2db 100644 --- a/pkg/streamingpromql/operators/selectors/range_vector_selector.go +++ b/pkg/streamingpromql/operators/selectors/range_vector_selector.go @@ -90,8 +90,8 @@ func (m *RangeVectorSelector) NextStepSamples() (*types.RangeVectorStepData, err // Apply offset after adjusting for timestamp from @ modifier. rangeEnd = rangeEnd - m.Selector.Offset rangeStart := rangeEnd - m.rangeMilliseconds - m.floats.DiscardPointsBefore(rangeStart) - m.histograms.DiscardPointsBefore(rangeStart) + m.floats.DiscardPointsAtOrBefore(rangeStart) + m.histograms.DiscardPointsAtOrBefore(rangeStart) if err := m.fillBuffer(m.floats, m.histograms, rangeStart, rangeEnd); err != nil { return nil, err @@ -116,7 +116,7 @@ func (m *RangeVectorSelector) fillBuffer(floats *types.FPointRingBuffer, histogr return m.chunkIterator.Err() case chunkenc.ValFloat: t, f := m.chunkIterator.At() - if value.IsStaleNaN(f) || t < rangeStart { + if value.IsStaleNaN(f) || t <= rangeStart { // Range vectors ignore stale markers // https://github.com/prometheus/prometheus/issues/3746#issuecomment-361572859 continue @@ -134,7 +134,7 @@ func (m *RangeVectorSelector) fillBuffer(floats *types.FPointRingBuffer, histogr } case chunkenc.ValHistogram, chunkenc.ValFloatHistogram: t := m.chunkIterator.AtT() - if t < rangeStart { + if t <= rangeStart { continue } hPoint, _ := histograms.NextPoint() diff --git a/pkg/streamingpromql/operators/selectors/selector.go b/pkg/streamingpromql/operators/selectors/selector.go index 020de73c116..c75c1c37a12 100644 --- a/pkg/streamingpromql/operators/selectors/selector.go +++ b/pkg/streamingpromql/operators/selectors/selector.go @@ -57,7 +57,7 @@ func (s *Selector) SeriesMetadata(ctx context.Context) ([]types.SeriesMetadata, // Apply lookback delta, range and offset after adjusting for timestamp from @ modifier. rangeMilliseconds := s.Range.Milliseconds() - startTimestamp = startTimestamp - s.LookbackDelta.Milliseconds() - rangeMilliseconds - s.Offset + startTimestamp = startTimestamp - s.LookbackDelta.Milliseconds() - rangeMilliseconds - s.Offset - 1 // -1 to exclude samples on the lower boundary of the range. endTimestamp = endTimestamp - s.Offset hints := &storage.SelectHints{ diff --git a/pkg/streamingpromql/operators/subquery.go b/pkg/streamingpromql/operators/subquery.go index 6ea90f28534..716b8ab7757 100644 --- a/pkg/streamingpromql/operators/subquery.go +++ b/pkg/streamingpromql/operators/subquery.go @@ -91,8 +91,8 @@ func (s *Subquery) NextStepSamples() (*types.RangeVectorStepData, error) { // Apply offset after adjusting for timestamp from @ modifier. rangeEnd = rangeEnd - s.SubqueryOffset rangeStart := rangeEnd - s.rangeMilliseconds - s.floats.DiscardPointsBefore(rangeStart) - s.histograms.DiscardPointsBefore(rangeStart) + s.floats.DiscardPointsAtOrBefore(rangeStart) + s.histograms.DiscardPointsAtOrBefore(rangeStart) s.stepData.Floats = s.floats.ViewUntilSearchingForwards(rangeEnd, s.stepData.Floats) s.stepData.Histograms = s.histograms.ViewUntilSearchingForwards(rangeEnd, s.stepData.Histograms) diff --git a/pkg/streamingpromql/testdata/ours-only/aggregators.test b/pkg/streamingpromql/testdata/ours-only/aggregators.test deleted file mode 100644 index a07f21258f3..00000000000 --- a/pkg/streamingpromql/testdata/ours-only/aggregators.test +++ /dev/null @@ -1,27 +0,0 @@ -# This test can be moved back into `ours` once https://github.com/prometheus/prometheus/pull/14941 is merged. - -load 1m - series{label="a", group="a"} 1 2 3 4 5 -50 - series{label="b", group="a"} 9 8 7 -10 _ 2 - series{label="c", group="b"} 1 2 3 4 5 -50 - series{label="d", group="b"} 1 NaN _ 40 NaN 0 - series{label="e", group="c"} 1 2 3 4 5 -50 - series{label="f", group="c"} -inf 3 0 9 inf 10 - series{label="g", group="d"} NaN 3 0 9 NaN 10 - series{label="h", group="e"} -inf 3 0 9 inf 10 - -eval range from 0 to 6m step 1m stddev by (group) (series) - {group="a"} 4 3 2 7 7.5 26 26 - {group="b"} 0 NaN NaN 18 NaN 25 25 - {group="c"} NaN 0.5 1.5 2.5 NaN 30 30 - {group="d"} NaN 0 0 0 NaN 0 0 - {group="e"} NaN 0 0 0 NaN 0 0 - -eval range from 0 to 6m step 1m stdvar by (group) (series) - {group="a"} 16 9 4 49 56.25 676 676 - {group="b"} 0 NaN NaN 324 NaN 625 625 - {group="c"} NaN 0.25 2.25 6.25 NaN 900 900 - {group="d"} NaN 0 0 0 NaN 0 0 - {group="e"} NaN 0 0 0 NaN 0 0 - -clear diff --git a/pkg/streamingpromql/testdata/ours-only/binary_operators.test b/pkg/streamingpromql/testdata/ours-only/binary_operators.test deleted file mode 100644 index dc26d439c94..00000000000 --- a/pkg/streamingpromql/testdata/ours-only/binary_operators.test +++ /dev/null @@ -1,22 +0,0 @@ -# SPDX-License-Identifier: AGPL-3.0-only - -load 6m - left_histograms {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:4.5 count:5 buckets:[1 3 1]}} _ _ {{schema:3 sum:4.5 count:5 buckets:[1 3 1]}} - -# These cases currently fails in Prometheus' engine due to https://github.com/prometheus/prometheus/issues/13934#issuecomment-2373132091. -# Once the issue described there has been resolved, these test cases can be removed and the corresponding test cases in -# testdata/ours/binary_operators.test can be enabled. -eval range from 0 to 24m step 6m 0 == left_histograms - left_histograms 0 0 _ _ 0 - -eval range from 0 to 24m step 6m 3 != left_histograms - left_histograms 0 0 _ _ 0 - -eval range from 0 to 24m step 6m 3 > left_histograms - left_histograms 0 0 _ _ 0 - -eval range from 0 to 24m step 6m 3 >= left_histograms - left_histograms 0 0 _ _ 0 - -eval range from 0 to 24m step 6m 0 >= left_histograms - left_histograms 0 0 _ _ 0 diff --git a/pkg/streamingpromql/testdata/ours-only/native_histograms.test b/pkg/streamingpromql/testdata/ours-only/native_histograms.test deleted file mode 100644 index 7ca63e9560c..00000000000 --- a/pkg/streamingpromql/testdata/ours-only/native_histograms.test +++ /dev/null @@ -1,21 +0,0 @@ -# Test metric with mixed floats and histograms -load 1m - mixed_metric {{schema:0 sum:5 count:4 buckets:[1 2 1]}} 1 2 3 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:0 sum:8 count:6 buckets:[1 4 1]}} - -# clamp ignores any histograms -# Prometheus currently returns min instead of no value as per the documentation -# https://github.com/prometheus/prometheus/pull/15169 -eval range from 0 to 5m step 1m clamp(mixed_metric, 2, 5) - {} _ 2 2 3 - -eval range from 0 to 5m step 1m clamp_min(mixed_metric, 2) - {} _ 2 2 3 - -eval range from 0 to 5m step 1m clamp_max(mixed_metric, 2) - {} _ 1 2 2 - -# round ignores any histograms -# Prometheus currently returns 0 instead of no value as per the documentation -# https://github.com/prometheus/prometheus/pull/15176 -eval range from 0 to 5m step 1m round(mixed_metric) - {} _ 1 2 3 diff --git a/pkg/streamingpromql/testdata/ours/aggregators.test b/pkg/streamingpromql/testdata/ours/aggregators.test index 206695c5eed..08975259b6b 100644 --- a/pkg/streamingpromql/testdata/ours/aggregators.test +++ b/pkg/streamingpromql/testdata/ours/aggregators.test @@ -250,17 +250,15 @@ load 1m series{label="value3"} -20 -9 -9 {{schema:1 sum:5 count:5 buckets:[1 3 1]}} {{schema:1 sum:5 count:5 buckets:[1 3 1]}} {{schema:1 sum:5 count:5 buckets:[1 3 1]}} histogram_only_series {{schema:1 sum:5 count:5 buckets:[1 3 1]}} {{schema:1 sum:5 count:5 buckets:[1 3 1]}} {{schema:1 sum:5 count:5 buckets:[1 3 1]}} -eval range from 0 to 5m step 1m max(series) - {} 0 -9 0 10 0 0 +eval_info range from 0 to 5m step 1m max(series) + {} 0 -9 -9 10 -10 _ -eval range from 0 to 5m step 1m min(series) - {} -20 -10 -10 0 -10 0 +eval_info range from 0 to 5m step 1m min(series) + {} -20 -10 -10 10 -10 _ -eval range from 0 to 2m step 1m max(histogram_only_series) - {} 0 0 0 +eval_info range from 0 to 2m step 1m max(histogram_only_series) -eval range from 0 to 2m step 1m min(histogram_only_series) - {} 0 0 0 +eval_info range from 0 to 2m step 1m min(histogram_only_series) clear @@ -273,14 +271,14 @@ load 1m mixed_series{type="histogram", label="value2"} NaN mixed_series{type="onlyNaN"} NaN -eval instant at 1m min by (type) (mixed_series) +eval_info instant at 1m min by (type) (mixed_series) {type="float"} -5 - {type="histogram"} 0 + {type="histogram"} NaN {type="onlyNaN"} NaN -eval instant at 1m max by (type) (mixed_series) +eval_info instant at 1m max by (type) (mixed_series) {type="float"} 10 - {type="histogram"} 0 + {type="histogram"} NaN {type="onlyNaN"} NaN clear @@ -303,8 +301,8 @@ load 5m native_histogram{instance="2"} {{schema:3 sum:4 count:4 buckets:[4]}} # Test range query with native histograms -eval range from 0m to 10m step 5m sum (native_histogram) - {} {{schema:3 count:11 sum:14 buckets:[5 6]}} {{schema:3 count:11 sum:12 buckets:[5 6]}} {{schema:5 count:17 sum:18 buckets:[1 5 1]}} +eval range from 0m to 10m step 5m sum(native_histogram) + {} {{schema:3 count:11 sum:14 buckets:[5 6]}} {{schema:5 sum:8 count:7 buckets:[1 5 1]}} {{schema:5 count:17 sum:18 buckets:[1 5 1]}} clear diff --git a/pkg/streamingpromql/testdata/ours/binary_operators.test b/pkg/streamingpromql/testdata/ours/binary_operators.test index f3806fef8c7..7a0bcb69d05 100644 --- a/pkg/streamingpromql/testdata/ours/binary_operators.test +++ b/pkg/streamingpromql/testdata/ours/binary_operators.test @@ -182,72 +182,59 @@ load 5m third_histogram{job="control"} {{schema:0 sum:15 count:5 buckets:[3 4 2]}} metric{job="test"} 2 -eval instant at 5m first_histogram + second_histogram +eval instant at 0 first_histogram + second_histogram {job="test"} {{schema:0 sum:15 count:10 buckets:[2 4 2]}} -eval instant at 5m second_histogram - first_histogram +eval instant at 0 second_histogram - first_histogram {job="test"} {{schema:0 sum:5 count:2 buckets:[0 0 0]}} -# Two histograms multiplied together result in a float 0 -eval instant at 5m first_histogram * second_histogram - {job="test"} 0 +# Cannot multiply two histograms +eval_info instant at 0 first_histogram * second_histogram # Cannot divide a histogram by a histogram -eval instant at 5m first_histogram / second_histogram - {job="test"} NaN +eval_info instant at 0 first_histogram / second_histogram # Histogram multiplied by float -eval instant at 5m first_histogram * metric +eval instant at 0 first_histogram * metric {job="test"} {{schema:0 count:8 sum:10 buckets:[2 4 2]}} # Works in either order -eval instant at 5m metric * first_histogram +eval instant at 0 metric * first_histogram {job="test"} {{schema:0 count:8 sum:10 buckets:[2 4 2]}} # Histogram divide by float -eval instant at 5m first_histogram / metric +eval instant at 0 first_histogram / metric {job="test"} {{schema:0 count:2 sum:2.5 buckets:[0.5 1 0.5]}} -# Float divided by histogram is equivalent to "float / 0" -eval instant at 5m metric / first_histogram - {job="test"} +Inf +# Cannot divide a float by a histogram +eval_info instant at 0 metric / first_histogram -# Histogram + float is equivalent to "0 + float" -eval instant at 5m first_histogram + metric - {job="test"} 2 +# Cannot add a float to a histogram +eval_info instant at 0 first_histogram + metric -# Histogram - float is equivalent to "0 - float" -eval instant at 5m first_histogram - metric - {job="test"} -2 +# Cannot subtract a float from a histogram +eval_info instant at 0 first_histogram - metric -# Histogram atan2 float is equivalent to "0 atan2 float" -eval instant at 5m first_histogram atan2 metric - {job="test"} 0 +# Cannot perform atan2 on a histogram. +eval_info instant at 0 first_histogram atan2 metric -# Histogram to the power of a histogram is equivalent to "0 ^ 0" -eval instant at 5m first_histogram ^ second_histogram - {job="test"} 1 +# Cannot apply ^ to a histogram. +eval_info instant at 0 first_histogram ^ second_histogram -# Histogram to power of a float is equivalent to "0 ^ float" -eval instant at 5m first_histogram ^ metric - {job="test"} 0 +# Cannot apply ^ to a histogram. +eval_info instant at 0 first_histogram ^ metric -# Float to power of histogram is equivalent to "float ^ 0" -eval instant at 5m metric ^ first_histogram - {job="test"} 1 +# Cannot apply ^ to a histogram. +eval_info instant at 0 metric ^ first_histogram -# Histogram mod histogram is equivalent to "0 % 0" -eval instant at 5m first_histogram % second_histogram - {job="test"} NaN +# Cannot apply % to a histogram. +eval_info instant at 0 first_histogram % second_histogram -# Histogram mod float is equivalent to "0 % float" -eval instant at 5m first_histogram % metric - {job="test"} 0 - -# Float mod histogram is equivalent to "float % 0" -eval instant at 5m metric % first_histogram - {job="test"} NaN +# Cannot apply % to a histogram. +eval_info instant at 0 first_histogram % metric +# Cannot apply % to a histogram. +eval_info instant at 0 metric % first_histogram clear @@ -260,11 +247,11 @@ load 5m second_histogram{env="dev",pod="pod-abc123"} {{schema:0 sum:10 count:5 buckets:[2 3 2]}} second_histogram{env="dev",pod="pod-mno789"} {{schema:0 sum:5 count:4 buckets:[1 1 1]}} -eval instant at 5m first_histogram + second_histogram +eval instant at 0 first_histogram + second_histogram {env="prod",pod="pod-abc123"} {{schema:0 sum:10 count:8 buckets:[2 4 2]}} {env="dev",pod="pod-abc123"} {{schema:0 sum:20 count:10 buckets:[4 6 4]}} -eval instant at 5m second_histogram - first_histogram +eval instant at 0 second_histogram - first_histogram {env="prod",pod="pod-abc123"} {{schema:0 sum:0 count:0 buckets:[0 0 0]}} {env="dev",pod="pod-abc123"} {{schema:0 sum:0 count:0 buckets:[0 0 0]}} @@ -284,22 +271,22 @@ clear # These tests are affected by https://github.com/prometheus/prometheus/issues/14172 # whereby the histograms followed by a float are ignored and the lookback float value is used. # For example, in "another_mixed", at T=5m it has the lookback value of 10, and at T=5min the lookback value is _. -load 5m - mixed_metric{job="test"} 10 1 2 3 {{schema:0 sum:6 count:3 buckets:[1 2 3]}} {{schema:0 sum:12 count:6 buckets:[2 4 6]}} - another_mixed{job="test"} 10 {{schema:0 sum:12 count:6 buckets:[2 4 6]}} {{schema:0 sum:12 count:6 buckets:[2 4 6]}} 4 5 {{schema:0 sum:12 count:6 buckets:[2 4 6]}} -# @0 @5m @10m @15m @20m @25m +load 1m + mixed_metric{job="test"} 10 1 2 3 {{schema:0 sum:6 count:3 buckets:[1 2 3]}} {{schema:0 sum:12 count:6 buckets:[2 4 6]}} + another_mixed{job="test"} 10 {{schema:0 sum:12 count:6 buckets:[2 4 6]}} {{schema:0 sum:12 count:6 buckets:[2 4 6]}} 4 5 {{schema:0 sum:12 count:6 buckets:[2 4 6]}} +# @0 @5m @10m @15m @20m @25m -eval range from 0 to 25m step 5m mixed_metric + another_mixed - {job="test"} 20 11 _ 7 5 {{schema:0 sum:24 count:12 buckets:[4 8 12]}} +eval range from 0 to 5m step 1m mixed_metric + another_mixed + {job="test"} 20 11 12 7 _ {{schema:0 sum:24 count:12 buckets:[4 8 12]}} -eval range from 0 to 25m step 5m mixed_metric - another_mixed - {job="test"} 0 -9 _ -1 -5 {{schema:0 sum:0 count:0}} +eval range from 0 to 5m step 1m mixed_metric - another_mixed + {job="test"} 0 -9 -8 -1 _ {{schema:0 sum:0 count:0}} -eval range from 0 to 25m step 5m mixed_metric * another_mixed - {job="test"} 100 10 _ 12 {{schema:0 sum:30 count:15 buckets:[5 10 15]}} 0 +eval range from 0 to 5m step 1m mixed_metric * another_mixed + {job="test"} 100 10 20 12 {{schema:0 sum:30 count:15 buckets:[5 10 15]}} _ -eval range from 0 to 25m step 5m mixed_metric / another_mixed - {job="test"} 1 0.1 _ 0.75 {{schema:0 sum:1.2 count:0.6 buckets:[0.2 0.4 0.6]}} NAN +eval range from 0 to 5m step 1m mixed_metric / another_mixed + {job="test"} 1 0.1 0.2 0.75 {{schema:0 sum:1.2 count:0.6 buckets:[0.2 0.4 0.6]}} _ clear @@ -329,23 +316,20 @@ eval range from 0m to 24m step 6m my_metric / 2 {job="bar"} 5 10 15 _ 25 # Scalar on left side -# Note that positive scalar / histogram == Inf. +# Note that positive scalar / histogram == nothing. eval range from 0m to 24m step 6m 2 / my_metric - {histograms="only"} Inf Inf - {histograms="both"} 0.4 0.2 +Inf + {histograms="both"} 0.4 0.2 _ {job="foo"} 2 1 0.6666666666666667 0.5 0.4 {job="bar"} 0.2 0.1 0.06666666666666667 _ 0.04 # Test other arithmetic operations. eval range from 0m to 24m step 6m my_metric + 2 - {histograms="only"} 2 2 - {histograms="both"} 7 12 2 + {histograms="both"} 7 12 _ {job="foo"} 3 4 5 6 7 {job="bar"} 12 22 32 _ 52 eval range from 0m to 24m step 6m my_metric - 2 - {histograms="only"} -2 -2 - {histograms="both"} 3 8 -2 + {histograms="both"} 3 8 {job="foo"} -1 0 1 2 3 {job="bar"} 8 18 28 _ 48 @@ -355,15 +339,13 @@ eval range from 0m to 24m step 6m my_metric * 2 {job="foo"} 2 4 6 8 10 {job="bar"} 20 40 60 _ 100 -eval range from 0m to 24m step 6m my_metric ^ 2 - {histograms="only"} 0 0 - {histograms="both"} 25 100 0 +eval_info range from 0m to 24m step 6m my_metric ^ 2 + {histograms="both"} 25 100 _ {job="foo"} 1 4 9 16 25 {job="bar"} 100 400 900 _ 2500 -eval range from 0m to 24m step 6m my_metric % 2 - {histograms="only"} 0 0 - {histograms="both"} 1 0 0 +eval_info range from 0m to 24m step 6m my_metric % 2 + {histograms="both"} 1 0 _ {job="foo"} 1 0 1 0 1 {job="bar"} 0 0 0 _ 0 @@ -408,7 +390,6 @@ eval range from 0m to 42m step 6m scalar(metric) + 2 clear # Comparison operations -# Note that native histograms are currently treated as 0s for comparison operations. load 6m left_floats 1 2 _ _ 3 stale 4 5 NaN Inf -Inf right_floats 4 _ _ 5 3 7 -1 20 NaN Inf -Inf @@ -427,16 +408,16 @@ eval range from 0 to 60m step 6m left_floats == does_not_match # No results. eval range from 0 to 24m step 6m left_histograms == right_histograms - left_histograms 0 0 _ _ _ + left_histograms {{schema:3 sum:4 count:4 buckets:[1 2 1]}} _ _ _ _ eval range from 0 to 24m step 6m left_histograms == bool right_histograms - {} 1 1 _ _ _ + {} 1 0 _ _ _ -eval range from 0 to 24m step 6m left_histograms == right_floats_for_histograms - left_histograms 0 _ _ _ _ +eval_info range from 0 to 24m step 6m left_histograms == right_floats_for_histograms + # No results. -eval range from 0 to 24m step 6m left_histograms == bool right_floats_for_histograms - {} 1 0 _ _ 0 +eval_info range from 0 to 24m step 6m left_histograms == bool right_floats_for_histograms + # No results. eval range from 0 to 60m step 6m left_floats != right_floats left_floats 1 _ _ _ _ _ 4 5 NaN _ _ @@ -445,16 +426,16 @@ eval range from 0 to 60m step 6m left_floats != bool right_floats {} 1 _ _ _ 0 _ 1 1 1 0 0 eval range from 0 to 24m step 6m left_histograms != right_histograms - # No results. + left_histograms _ {{schema:3 sum:4.5 count:5 buckets:[1 3 1]}} _ _ _ eval range from 0 to 24m step 6m left_histograms != bool right_histograms - {} 0 0 _ _ _ + {} 0 1 _ _ _ -eval range from 0 to 24m step 6m left_histograms != right_floats_for_histograms - left_histograms _ 0 _ _ 0 +eval_info range from 0 to 24m step 6m left_histograms != right_floats_for_histograms + # No results. -eval range from 0 to 24m step 6m left_histograms != bool right_floats_for_histograms - {} 0 1 _ _ 1 +eval_info range from 0 to 24m step 6m left_histograms != bool right_floats_for_histograms + # No results. eval range from 0 to 60m step 6m left_floats > right_floats left_floats _ _ _ _ _ _ 4 _ _ _ _ @@ -462,17 +443,17 @@ eval range from 0 to 60m step 6m left_floats > right_floats eval range from 0 to 60m step 6m left_floats > bool right_floats {} 0 _ _ _ 0 _ 1 0 0 0 0 -eval range from 0 to 24m step 6m left_histograms > right_histograms +eval_info range from 0 to 24m step 6m left_histograms > right_histograms # No results. -eval range from 0 to 24m step 6m left_histograms > bool right_histograms - {} 0 0 _ _ _ +eval_info range from 0 to 24m step 6m left_histograms > bool right_histograms + # No results. -eval range from 0 to 24m step 6m left_histograms > right_floats_for_histograms - left_histograms _ 0 _ _ _ +eval_info range from 0 to 24m step 6m left_histograms > right_floats_for_histograms + # No results. -eval range from 0 to 24m step 6m left_histograms > bool right_floats_for_histograms - {} 0 1 _ _ 0 +eval_info range from 0 to 24m step 6m left_histograms > bool right_floats_for_histograms + # No results. eval range from 0 to 60m step 6m left_floats >= right_floats left_floats _ _ _ _ 3 _ 4 _ _ Inf -Inf @@ -480,17 +461,17 @@ eval range from 0 to 60m step 6m left_floats >= right_floats eval range from 0 to 60m step 6m left_floats >= bool right_floats {} 0 _ _ _ 1 _ 1 0 0 1 1 -eval range from 0 to 24m step 6m left_histograms >= right_histograms - left_histograms 0 0 _ _ _ +eval_info range from 0 to 24m step 6m left_histograms >= right_histograms + # No results. -eval range from 0 to 24m step 6m left_histograms >= bool right_histograms - {} 1 1 _ _ _ +eval_info range from 0 to 24m step 6m left_histograms >= bool right_histograms + # No results. -eval range from 0 to 24m step 6m left_histograms >= right_floats_for_histograms - left_histograms 0 0 _ _ _ +eval_info range from 0 to 24m step 6m left_histograms >= right_floats_for_histograms + # No results. -eval range from 0 to 24m step 6m left_histograms >= bool right_floats_for_histograms - {} 1 1 _ _ 0 +eval_info range from 0 to 24m step 6m left_histograms >= bool right_floats_for_histograms + # No results. eval range from 0 to 60m step 6m left_floats < right_floats left_floats 1 _ _ _ _ _ _ 5 _ _ _ @@ -498,17 +479,17 @@ eval range from 0 to 60m step 6m left_floats < right_floats eval range from 0 to 60m step 6m left_floats < bool right_floats {} 1 _ _ _ 0 _ 0 1 0 0 0 -eval range from 0 to 24m step 6m left_histograms < right_histograms +eval_info range from 0 to 24m step 6m left_histograms < right_histograms # No results. -eval range from 0 to 24m step 6m left_histograms < bool right_histograms - {} 0 0 _ _ _ +eval_info range from 0 to 24m step 6m left_histograms < bool right_histograms + # No results. -eval range from 0 to 24m step 6m left_histograms < right_floats_for_histograms - left_histograms _ _ _ _ 0 +eval_info range from 0 to 24m step 6m left_histograms < right_floats_for_histograms + # No results. -eval range from 0 to 24m step 6m left_histograms < bool right_floats_for_histograms - {} 0 0 _ _ 1 +eval_info range from 0 to 24m step 6m left_histograms < bool right_floats_for_histograms + # No results. eval range from 0 to 60m step 6m left_floats <= right_floats left_floats 1 _ _ _ 3 _ _ 5 _ Inf -Inf @@ -516,17 +497,17 @@ eval range from 0 to 60m step 6m left_floats <= right_floats eval range from 0 to 60m step 6m left_floats <= bool right_floats {} 1 _ _ _ 1 _ 0 1 0 1 1 -eval range from 0 to 24m step 6m left_histograms <= right_histograms - left_histograms 0 0 _ _ _ +eval_info range from 0 to 24m step 6m left_histograms <= right_histograms + # No results. -eval range from 0 to 24m step 6m left_histograms <= bool right_histograms - {} 1 1 _ _ _ +eval_info range from 0 to 24m step 6m left_histograms <= bool right_histograms + # No results. -eval range from 0 to 24m step 6m left_histograms <= right_floats_for_histograms - left_histograms 0 _ _ _ 0 +eval_info range from 0 to 24m step 6m left_histograms <= right_floats_for_histograms + # No results. -eval range from 0 to 24m step 6m left_histograms <= bool right_floats_for_histograms - {} 1 0 _ _ 1 +eval_info range from 0 to 24m step 6m left_histograms <= bool right_floats_for_histograms + # No results. # Vector / scalar combinations with scalar on right side eval range from 0 to 60m step 6m left_floats == 3 @@ -562,77 +543,77 @@ eval range from 0 to 60m step 6m left_floats == NaN eval range from 0 to 60m step 6m left_floats == bool NaN {} 0 0 _ _ 0 _ 0 0 0 0 0 -eval range from 0 to 24m step 6m left_histograms == 3 +eval_info range from 0 to 24m step 6m left_histograms == 3 # No results. -eval range from 0 to 24m step 6m left_histograms == 0 - left_histograms 0 0 _ _ 0 +eval_info range from 0 to 24m step 6m left_histograms == 0 + # No results. -eval range from 0 to 24m step 6m left_histograms != 3 - left_histograms 0 0 _ _ 0 +eval_info range from 0 to 24m step 6m left_histograms != 3 + # No results. eval range from 0 to 24m step 6m left_histograms != 0 # No results. -eval range from 0 to 24m step 6m left_histograms > 3 +eval_info range from 0 to 24m step 6m left_histograms > 3 # No results. -eval range from 0 to 24m step 6m left_histograms > 0 +eval_info range from 0 to 24m step 6m left_histograms > 0 # No results. eval range from 0 to 24m step 6m left_histograms >= 3 # No results. -eval range from 0 to 24m step 6m left_histograms >= 0 - left_histograms 0 0 _ _ 0 +eval_info range from 0 to 24m step 6m left_histograms >= 0 + # No results. -eval range from 0 to 24m step 6m left_histograms < 3 - left_histograms 0 0 _ _ 0 +eval_info range from 0 to 24m step 6m left_histograms < 3 + # No results. -eval range from 0 to 24m step 6m left_histograms < 0 +eval_info range from 0 to 24m step 6m left_histograms < 0 # No results. -eval range from 0 to 24m step 6m left_histograms <= 3 - left_histograms 0 0 _ _ 0 +eval_info range from 0 to 24m step 6m left_histograms <= 3 + # No results. eval range from 0 to 24m step 6m left_histograms <= 0 - left_histograms 0 0 _ _ 0 + # No results. -eval range from 0 to 24m step 6m left_histograms == bool 3 - {} 0 0 _ _ 0 +eval_info range from 0 to 24m step 6m left_histograms == bool 3 + # No results. -eval range from 0 to 24m step 6m left_histograms == bool 0 - {} 1 1 _ _ 1 +eval_info range from 0 to 24m step 6m left_histograms == bool 0 + # No results. -eval range from 0 to 24m step 6m left_histograms != bool 3 - {} 1 1 _ _ 1 +eval_info range from 0 to 24m step 6m left_histograms != bool 3 + # No results. -eval range from 0 to 24m step 6m left_histograms != bool 0 - {} 0 0 _ _ 0 +eval_info range from 0 to 24m step 6m left_histograms != bool 0 + # No results. -eval range from 0 to 24m step 6m left_histograms > bool 3 - {} 0 0 _ _ 0 +eval_info range from 0 to 24m step 6m left_histograms > bool 3 + # No results. -eval range from 0 to 24m step 6m left_histograms > bool 0 - {} 0 0 _ _ 0 +eval_info range from 0 to 24m step 6m left_histograms > bool 0 + # No results. -eval range from 0 to 24m step 6m left_histograms >= bool 3 - {} 0 0 _ _ 0 +eval_info range from 0 to 24m step 6m left_histograms >= bool 3 + # No results. -eval range from 0 to 24m step 6m left_histograms >= bool 0 - {} 1 1 _ _ 1 +eval_info range from 0 to 24m step 6m left_histograms >= bool 0 + # No results. -eval range from 0 to 24m step 6m left_histograms < bool 3 - {} 1 1 _ _ 1 +eval_info range from 0 to 24m step 6m left_histograms < bool 3 + # No results. -eval range from 0 to 24m step 6m left_histograms < bool 0 - {} 0 0 _ _ 0 +eval_info range from 0 to 24m step 6m left_histograms < bool 0 + # No results. -eval range from 0 to 24m step 6m left_histograms <= bool 3 - {} 1 1 _ _ 1 +eval_info range from 0 to 24m step 6m left_histograms <= bool 3 + # No results. -eval range from 0 to 24m step 6m left_histograms <= bool 0 - {} 1 1 _ _ 1 +eval_info range from 0 to 24m step 6m left_histograms <= bool 0 + # No results. # Vector / scalar combinations with scalar on left side eval range from 0 to 60m step 6m 3 == left_floats @@ -671,17 +652,11 @@ eval range from 0 to 60m step 6m NaN == bool left_floats eval range from 0 to 24m step 6m 3 == left_histograms # No results. -# This case currently fails in Prometheus' engine due to https://github.com/prometheus/prometheus/issues/13934#issuecomment-2373132091. -# It has been moved to testdata/ours-only/binary_operators.test until the issue described there -# has been resolved. -# eval range from 0 to 24m step 6m 0 == left_histograms -# left_histograms 0 0 _ _ 0 +eval range from 0 to 24m step 6m 0 == left_histograms + # No results. -# This case currently fails in Prometheus' engine due to https://github.com/prometheus/prometheus/issues/13934#issuecomment-2373132091. -# It has been moved to testdata/ours-only/binary_operators.test until the issue described there -# has been resolved. -# eval range from 0 to 24m step 6m 3 != left_histograms -# left_histograms 0 0 _ _ 0 +eval range from 0 to 24m step 6m 3 != left_histograms + # No results. eval range from 0 to 24m step 6m 0 != left_histograms # No results. @@ -698,23 +673,17 @@ eval range from 0 to 24m step 6m 3 < left_histograms eval range from 0 to 24m step 6m 0 < left_histograms # No results. -# This case currently fails in Prometheus' engine due to https://github.com/prometheus/prometheus/issues/13934#issuecomment-2373132091. -# It has been moved to testdata/ours-only/binary_operators.test until the issue described there has been resolved. -# eval range from 0 to 24m step 6m 3 > left_histograms -# left_histograms 0 0 _ _ 0 +eval range from 0 to 24m step 6m 3 > left_histograms + # No results. eval range from 0 to 24m step 6m 0 > left_histograms # No results. -# This case currently fails in Prometheus' engine due to https://github.com/prometheus/prometheus/issues/13934#issuecomment-2373132091. -# It has been moved to testdata/ours-only/binary_operators.test until the issue described there has been resolved. -# eval range from 0 to 24m step 6m 3 >= left_histograms -# left_histograms 0 0 _ _ 0 +eval range from 0 to 24m step 6m 3 >= left_histograms + # No results. -# This case currently fails in Prometheus' engine due to https://github.com/prometheus/prometheus/issues/13934#issuecomment-2373132091. -# It has been moved to testdata/ours-only/binary_operators.test until the issue described there has been resolved. -# eval range from 0 to 24m step 6m 0 >= left_histograms -# left_histograms 0 0 _ _ 0 +eval range from 0 to 24m step 6m 0 >= left_histograms + # No results. # Scalar / scalar combinations eval range from 0 to 60m step 6m scalar(left_floats) == bool 3 diff --git a/pkg/streamingpromql/testdata/ours/functions.test b/pkg/streamingpromql/testdata/ours/functions.test index a1f43e617a3..f05c3cd3b7a 100644 --- a/pkg/streamingpromql/testdata/ours/functions.test +++ b/pkg/streamingpromql/testdata/ours/functions.test @@ -12,18 +12,18 @@ load 1m some_metric_with_stale_marker 0 60 120 stale 240 300 # Range query with rate. -eval range from 0 to 4m step 1m rate(some_metric[1m]) - {env="prod", cluster="eu"} _ 1 1 1 1 - {env="prod", cluster="us"} _ 2 2 2 2 - {env="test", cluster="eu"} _ 3 3 3 3 - {env="test", cluster="us"} _ 4 4 4 4 +eval range from 0 to 4m step 1m rate(some_metric[1m1s]) + {env="prod", cluster="eu"} _ 0.9836065573770493 1 1 1 + {env="prod", cluster="us"} _ 1.9672131147540985 2 2 2 + {env="test", cluster="eu"} _ 2.9508196721311477 3 3 3 + {env="test", cluster="us"} _ 3.934426229508197 4 4 4 # Range query with increase. -eval range from 0 to 4m step 1m increase(some_metric[1m]) - {env="prod", cluster="eu"} _ 60 60 60 60 - {env="prod", cluster="us"} _ 120 120 120 120 - {env="test", cluster="eu"} _ 180 180 180 180 - {env="test", cluster="us"} _ 240 240 240 240 +eval range from 0 to 4m step 1m increase(some_metric[1m1s]) + {env="prod", cluster="eu"} _ 60 61 61 61 + {env="prod", cluster="us"} _ 120 122 122 122 + {env="test", cluster="eu"} _ 180 183 183 183 + {env="test", cluster="us"} _ 240 244 244 244 # If no series are matched, we shouldn't return any results. eval range from 0 to 4m step 1m rate(some_nonexistent_metric[1m]) @@ -40,24 +40,24 @@ eval range from 0 to 4m step 1m increase(some_nonexistent_metric[1m]) # # The first query below (with 1m) tests that we correctly skip evaluating rate() when there aren't enough points in the range. # The second query below (with 2m) tests that we correctly pick the last point from the buffer if the last point in the buffer is outside the range. -eval range from 0 to 8m step 1m rate(some_metric_with_gaps[1m]) - {} _ 1 1 1 1 _ _ 2 2 +eval range from 0 to 8m step 1m rate(some_metric_with_gaps[1m1s]) + {} _ 0.9836065573770493 1 1 1 _ _ 2 2 -eval range from 0 to 8m step 1m increase(some_metric_with_gaps[1m]) - {} _ 60 60 60 60 _ _ 120 120 +eval range from 0 to 8m step 1m increase(some_metric_with_gaps[1m1s]) + {} _ 60 61 61 61 _ _ 122 122 -eval range from 0 to 8m step 1m rate(some_metric_with_gaps[2m]) - {} _ 0.5 1 1 1 1 14.666666666666666 2 2 +eval range from 0 to 8m step 1m rate(some_metric_with_gaps[2m1s]) + {} _ 0.49586776859504134 0.9917355371900827 1 1 1 14.666666666666666 2 2 -eval range from 0 to 8m step 1m increase(some_metric_with_gaps[2m]) - {} _ 60 120 120 120 120 1760 240 240 +eval range from 0 to 8m step 1m increase(some_metric_with_gaps[2m1s]) + {} _ 60 120 121 121 121 1774.6666666666665 242 242 # Test that we handle staleness markers correctly. -eval range from 0 to 5m step 1m rate(some_metric_with_stale_marker[2m]) - {} _ 0.5 1 1 1 1 +eval range from 0 to 5m step 1m rate(some_metric_with_stale_marker[2m1s]) + {} _ 0.49586776859504134 0.9917355371900827 1 1 1 -eval range from 0 to 5m step 1m increase(some_metric_with_stale_marker[2m]) - {} _ 60 120 120 120 120 +eval range from 0 to 5m step 1m increase(some_metric_with_stale_marker[2m1s]) + {} _ 60 120 121 121 121 clear @@ -90,76 +90,76 @@ load 1m some_nhcb_metric{baz="bar"} {{schema:-53 sum:1 count:5 custom_values:[5 10] buckets:[1 4]}} {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} {{schema:-53 sum:3 count:15 custom_values:[5 10] buckets:[7 8]}} some_inf_and_nan_metric{foo="baz"} 0 1 2 3 Inf Inf Inf NaN NaN NaN NaN 8 7 6 -eval range from 0 to 7m step 1m count_over_time(some_metric[3m]) +eval range from 0 to 7m step 1m count_over_time(some_metric[3m1s]) {foo="bar"} 1 2 3 4 3 2 2 2 -eval range from 0 to 7m step 1m count_over_time(some_metric[5s]) +eval range from 0 to 7m step 1m count_over_time(some_metric[6s]) {foo="bar"} 1 1 1 1 _ _ 1 1 -eval range from 0 to 7m step 1m last_over_time(some_metric[3m]) +eval range from 0 to 7m step 1m last_over_time(some_metric[3m1s]) some_metric{foo="bar"} 0 1 2 3 3 3 {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} -eval range from 0 to 7m step 1m last_over_time(some_metric[5s]) +eval range from 0 to 7m step 1m last_over_time(some_metric[6s]) some_metric{foo="bar"} 0 1 2 3 _ _ {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} -eval range from 0 to 7m step 1m present_over_time(some_metric[3m]) +eval range from 0 to 7m step 1m present_over_time(some_metric[3m1s]) {foo="bar"} 1 1 1 1 1 1 1 1 -eval range from 0 to 7m step 1m present_over_time(some_metric[5s]) +eval range from 0 to 7m step 1m present_over_time(some_metric[6s]) {foo="bar"} 1 1 1 1 _ _ 1 1 -eval range from 0 to 7m step 1m min_over_time(some_metric[3m]) +eval range from 0 to 7m step 1m min_over_time(some_metric[3m1s]) {foo="bar"} 0 0 0 0 1 2 3 _ -eval range from 0 to 7m step 1m min_over_time(some_metric[5s]) +eval range from 0 to 7m step 1m min_over_time(some_metric[6s]) {foo="bar"} 0 1 2 3 _ _ _ _ -eval range from 0 to 16m step 1m min_over_time(some_inf_and_nan_metric[3m]) +eval range from 0 to 16m step 1m min_over_time(some_inf_and_nan_metric[3m1s]) {foo="baz"} 0 0 0 0 1 2 3 Inf Inf Inf NaN 8 7 6 6 6 6 -eval range from 0 to 7m step 1m max_over_time(some_metric[3m]) +eval range from 0 to 7m step 1m max_over_time(some_metric[3m1s]) {foo="bar"} 0 1 2 3 3 3 3 _ -eval range from 0 to 7m step 1m max_over_time(some_metric[5s]) +eval range from 0 to 7m step 1m max_over_time(some_metric[6s]) {foo="bar"} 0 1 2 3 _ _ _ _ -eval range from 0 to 16m step 1m max_over_time(some_inf_and_nan_metric[3m]) +eval range from 0 to 16m step 1m max_over_time(some_inf_and_nan_metric[3m1s]) {foo="baz"} 0 1 2 3 Inf Inf Inf Inf Inf Inf NaN 8 8 8 8 7 6 -eval_warn range from 0 to 10m step 1m sum_over_time(some_metric[3m]) +eval_warn range from 0 to 10m step 1m sum_over_time(some_metric[3m1s]) {foo="bar"} 0 1 3 6 6 5 _ {{schema:3 sum:9 count:7 buckets:[3 7 5]}} {{schema:3 sum:9 count:7 buckets:[3 7 5]}} {{schema:3 sum:9 count:7 buckets:[3 7 5]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} -eval range from 0 to 5m step 1m sum_over_time(some_metric[3m]) +eval range from 0 to 5m step 1m sum_over_time(some_metric[3m1s]) {foo="bar"} 0 1 3 6 6 5 -eval range from 7m to 10m step 1m sum_over_time(some_metric[3m]) +eval range from 7m to 10m step 1m sum_over_time(some_metric[3m1s]) {foo="bar"} {{schema:3 sum:9 count:7 buckets:[3 7 5]}} {{schema:3 sum:9 count:7 buckets:[3 7 5]}} {{schema:3 sum:9 count:7 buckets:[3 7 5]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} -eval range from 0 to 7m step 1m sum_over_time(some_metric[5s]) +eval range from 0 to 7m step 1m sum_over_time(some_metric[6s]) {foo="bar"} 0 1 2 3 _ _ {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} -eval range from 0 to 2m step 1m sum_over_time(some_nhcb_metric[3m]) +eval range from 0 to 2m step 1m sum_over_time(some_nhcb_metric[3m1s]) {baz="bar"} {{schema:-53 sum:1 count:5 custom_values:[5 10] buckets:[1 4]}} {{schema:-53 sum:16 count:7 custom_values:[5 10] buckets:[1 6]}} {{schema:-53 sum:19 count:22 custom_values:[5 10] buckets:[8 14]}} -eval range from 0 to 16m step 1m sum_over_time(some_inf_and_nan_metric[3m]) +eval range from 0 to 16m step 1m sum_over_time(some_inf_and_nan_metric[3m1s]) {foo="baz"} 0 1 3 6 Inf Inf Inf NaN NaN NaN NaN NaN NaN NaN 21 13 6 -eval_warn range from 0 to 10m step 1m avg_over_time(some_metric[3m]) +eval_warn range from 0 to 10m step 1m avg_over_time(some_metric[3m1s]) {foo="bar"} 0 0.5 1 1.5 2 2.5 _ {{schema:3 sum:4.5 count:3.5 buckets:[1.5 3.5 2.5]}} {{schema:3 sum:4.5 count:3.5 buckets:[1.5 3.5 2.5]}} {{schema:3 sum:4.5 count:3.5 buckets:[1.5 3.5 2.5]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} -eval range from 0 to 5m step 1m avg_over_time(some_metric[3m]) +eval range from 0 to 5m step 1m avg_over_time(some_metric[3m1s]) {foo="bar"} 0 0.5 1 1.5 2 2.5 -eval range from 7m to 10m step 1m avg_over_time(some_metric[3m]) +eval range from 7m to 10m step 1m avg_over_time(some_metric[3m1s]) {foo="bar"} {{schema:3 sum:4.5 count:3.5 buckets:[1.5 3.5 2.5]}} {{schema:3 sum:4.5 count:3.5 buckets:[1.5 3.5 2.5]}} {{schema:3 sum:4.5 count:3.5 buckets:[1.5 3.5 2.5]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} -eval range from 0 to 7m step 1m avg_over_time(some_metric[5s]) +eval range from 0 to 7m step 1m avg_over_time(some_metric[6s]) {foo="bar"} 0 1 2 3 _ _ {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:5 count:3 buckets:[2 5 4]}} -eval range from 0 to 2m step 1m avg_over_time(some_nhcb_metric[3m]) +eval range from 0 to 2m step 1m avg_over_time(some_nhcb_metric[3m1s]) {baz="bar"} {{schema:-53 sum:1 count:5 custom_values:[5 10] buckets:[1 4]}} {{schema:-53 sum:8 count:3.5 custom_values:[5 10] buckets:[0.5 3]}} {{schema:-53 sum:6.333333333333334 count:7.333333333333333 custom_values:[5 10] buckets:[2.666666666666667 4.666666666666666]}} -eval range from 0 to 16m step 1m avg_over_time(some_inf_and_nan_metric[3m]) +eval range from 0 to 16m step 1m avg_over_time(some_inf_and_nan_metric[3m1s]) {foo="baz"} 0 0.5 1 1.5 Inf Inf Inf NaN NaN NaN NaN NaN NaN NaN 7 6.5 6 clear @@ -292,13 +292,13 @@ load 5m another_metric{env="1"} 60 120 180 # Allows relabeling (to-be-dropped) __name__ via label_replace -eval_fail instant at 15m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)") +eval_fail instant at 15m label_replace(rate({env="1"}[15m]), "my_name", "rate_$1", "__name__", "(.+)") expected_fail_message vector cannot contain metrics with the same labelset # {my_name="rate_metric", env="1"} 0.2 # {my_name="rate_another_metric", env="1"} 0.2 # Allows preserving __name__ via label_replace -eval_fail instant at 15m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)") +eval_fail instant at 15m label_replace(rate({env="1"}[15m]), "__name__", "rate_$1", "__name__", "(.+)") expected_fail_message vector cannot contain metrics with the same labelset # rate_metric{env="1"} 0.2 # rate_another_metric{env="1"} 0.2 @@ -417,15 +417,26 @@ load 5m mins 0 10 0 30 0 -10 0 5 NaN 0 NaN NaN maxes 10 20 5 0 -10 -5 40 50 10 NaN NaN NaN series -10 0 10 20 30 40 50 NaN 60 70 80 NaN + mixed_metric {{schema:0 sum:5 count:4 buckets:[1 2 1]}} 1 2 3 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:0 sum:8 count:6 buckets:[1 4 1]}} eval range from 0 to 60m step 5m clamp(series, scalar(mins), scalar(maxes)) - {} 0 10 5 _ _ -5 40 NaN NaN NaN NaN NaN NaN + {} 0 10 5 _ _ -5 40 NaN NaN NaN NaN NaN _ eval range from 0 to 60m step 5m clamp_min(series, scalar(mins)) - {} 0 10 10 30 30 40 50 NaN NaN 70 NaN NaN NaN + {} 0 10 10 30 30 40 50 NaN NaN 70 NaN NaN _ eval range from 0 to 60m step 5m clamp_max(series, scalar(maxes)) - {} -10 0 5 0 -10 -5 40 NaN 10 NaN NaN NaN NaN + {} -10 0 5 0 -10 -5 40 NaN 10 NaN NaN NaN _ + +# clamp ignores any histograms +eval range from 0 to 15m step 5m clamp(mixed_metric, 2, 5) + {} _ 2 2 3 + +eval range from 0 to 15m step 5m clamp_min(mixed_metric, 2) + {} _ 2 2 3 + +eval range from 0 to 15m step 5m clamp_max(mixed_metric, 2) + {} _ 1 2 2 clear @@ -448,10 +459,15 @@ clear load 6m toNearest 1 _ 3 4 0.1 series{a="b"} -5.5 2.75 _ _ 6.87 + mixed_metric {{schema:0 sum:5 count:4 buckets:[1 2 1]}} 1 2 3 eval range from 0 to 24m step 6m round(series, scalar(toNearest)) {a="b"} -5 NaN _ _ 6.9 +# round ignores any histograms +eval range from 0 to 18m step 6m round(mixed_metric) + {} _ 1 2 3 + clear load 6m @@ -484,7 +500,7 @@ load 1m metric{case="nhcb, some resets"} {{schema:-53 sum:1 count:5 custom_values:[5 10] buckets:[1 4]}} {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} {{schema:-53 sum:3 count:15 custom_values:[5 10] buckets:[7 8]}} {{schema:-53 sum:3 count:15 custom_values:[5 10] buckets:[0 0]}} metric{case="floats, nh and nhcb, some resets"} 0 1 2 3 2 1 0 _ {{schema:3 sum:0 count:2 buckets:[1 2 1]}} {{schema:3 sum:0 count:1 buckets:[1 2 1]}} {{schema:3 sum:0 count:1 buckets:[1 0 1]}} {{schema:-53 sum:1 count:5 custom_values:[5 10] buckets:[1 4]}} {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} {{schema:-53 sum:3 count:15 custom_values:[5 10] buckets:[7 8]}} {{schema:-53 sum:3 count:15 custom_values:[5 10] buckets:[0 0]}} -eval range from 0 to 20m step 1m resets(metric[3m]) +eval range from 0 to 20m step 1m resets(metric[3m1s]) {case="all same floats, no resets value=0"} 0 0 0 0 0 0 0 0 0 0 0 {case="all same floats, no resets value=3"} 0 0 0 0 0 0 0 0 0 0 0 {case="all floats, no resets"} 0 0 0 0 0 0 0 0 0 0 @@ -525,7 +541,7 @@ load 1m metric{case="nhcb"} {{schema:-53 sum:1 count:5 custom_values:[5 10] buckets:[1 4]}} {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} {{schema:-53 sum:3 count:15 custom_values:[5 10] buckets:[7 8]}} {{schema:-53 sum:3 count:15 custom_values:[5 10] buckets:[0 0]}} metric{case="floats, nh and nhcb"} 0 1 2 3 2 1 0 _ {{schema:3 sum:0 count:2 buckets:[1 2 1]}} {{schema:3 sum:0 count:1 buckets:[1 2 1]}} {{schema:3 sum:0 count:1 buckets:[1 0 1]}} {{schema:-53 sum:1 count:5 custom_values:[5 10] buckets:[1 4]}} {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} {{schema:-53 sum:3 count:15 custom_values:[5 10] buckets:[7 8]}} {{schema:-53 sum:3 count:15 custom_values:[5 10] buckets:[0 0]}} -eval range from 0 to 20m step 1m deriv(metric[3m]) +eval range from 0 to 20m step 1m deriv(metric[3m1s]) {case="all Inf"} _ NaN NaN NaN NaN NaN NaN NaN NaN NaN {case="all NaN"} _ NaN NaN NaN NaN NaN NaN NaN NaN NaN {case="all floats 1"} _ 0.016666666666666666 0.016666666666666666 0.016666666666666666 0.016666666666666666 0.016666666666666666 0.016666666666666666 0.016666666666666666 0.016666666666666666 diff --git a/pkg/streamingpromql/testdata/ours/native_histograms.test b/pkg/streamingpromql/testdata/ours/native_histograms.test index 614dbfe1477..d9d762d154f 100644 --- a/pkg/streamingpromql/testdata/ours/native_histograms.test +++ b/pkg/streamingpromql/testdata/ours/native_histograms.test @@ -56,17 +56,17 @@ load 5m route{path="two"} {{schema:0 sum:10 count:20 buckets:[9 10 1]}} route{path="three"} {{schema:0 sum:12 count:10 buckets:[3 2 5]}} -eval instant at 5m route +eval instant at 0 route route{path="one"} {{schema:0 sum:5 count:4 buckets:[1 2 1]}} route{path="two"} {{schema:0 sum:10 count:20 buckets:[9 10 1]}} route{path="three"} {{schema:0 sum:12 count:10 buckets:[3 2 5]}} -eval instant at 5m histogram_count(route) +eval instant at 0 histogram_count(route) {path="one"} 4 {path="two"} 20 {path="three"} 10 -eval instant at 5m histogram_sum(route) +eval instant at 0 histogram_sum(route) {path="one"} 5 {path="two"} 10 {path="three"} 12 @@ -135,11 +135,11 @@ load 1m # - The second value is a rate/increase from two floats # - The third value is a rate/increase across a float and histogram (so no value returned) # - The remaining values contain the rate/increase across two histograms in the vector -eval_warn range from 0 to 4m step 1m rate(incr_histogram[1m]) +eval_warn range from 0 to 4m step 1m rate(incr_histogram[1m1s]) {} _ 0.016666666666666666 _ {{schema:3 count:0.016666666666666666 sum:0.03333333333333333 offset:1 buckets:[0.016666666666666666]}} {{schema:3 count:0.016666666666666666 sum:0.03333333333333333 offset:1 buckets:[0.016666666666666666]}} -eval_warn range from 0 to 4m step 1m increase(incr_histogram[1m]) - {} _ 1 _ {{schema:3 count:1 sum:2 offset:1 buckets:[1]}} {{schema:3 count:1 sum:2 offset:1 buckets:[1]}} +eval_warn range from 0 to 4m step 1m increase(incr_histogram[1m1s]) + {} _ 1.0166666666666666 _ {{schema:3 count:1.0166666666666666 sum:2.033333333333333 offset:1 buckets:[1.0166666666666666]}} {{schema:3 count:1.0166666666666666 sum:2.033333333333333 offset:1 buckets:[1.0166666666666666]}} clear @@ -164,11 +164,11 @@ clear load 1m metric {{sum:3 count:4 buckets:[1 2 1] counter_reset_hint:reset}} {{sum:63 count:124 buckets:[100 4 20] counter_reset_hint:not_reset}} -eval instant at 1m rate(metric[1m]) +eval instant at 1m rate(metric[1m1s]) {} {{sum:1 count:2 buckets:[1.65 0.03333333333333333 0.31666666666666665] counter_reset_hint:gauge}} -eval instant at 1m increase(metric[1m]) - {} {{sum:60 count:120 buckets:[99 2 19] counter_reset_hint:gauge}} +eval instant at 1m increase(metric[1m1s]) + {} {{count:122 sum:61 buckets:[100.64999999999999 2.033333333333333 19.316666666666666]}} clear @@ -208,8 +208,8 @@ clear load 1m metric {{schema:4 sum:2 count:2 buckets:[2]}} {{schema:2 sum:6 count:12 buckets:[12]}} {{schema:3 sum:12 count:24 buckets:[24]}} {{schema:4 sum:164 count:326 buckets:[326]}} -eval instant at 3m rate(metric[3m]) +eval instant at 3m rate(metric[3m1s]) {} {{schema:2 sum:1 count:2 buckets:[2] counter_reset_hint:gauge}} -eval instant at 3m increase(metric[3m]) - {} {{schema:2 sum:180 count:360 buckets:[360] counter_reset_hint:gauge}} +eval instant at 3m increase(metric[3m1s]) + {} {{schema:2 sum:181 count:362 buckets:[362] counter_reset_hint:gauge}} diff --git a/pkg/streamingpromql/testdata/ours/subqueries.test b/pkg/streamingpromql/testdata/ours/subqueries.test index fe454c43c05..3f757795acd 100644 --- a/pkg/streamingpromql/testdata/ours/subqueries.test +++ b/pkg/streamingpromql/testdata/ours/subqueries.test @@ -8,28 +8,28 @@ load 1m metric{type="histograms"} {{count:0}} {{count:4}} {{count:3}} {{count:6}} {{count:-1}} {{count:10}} metric{type="mixed"} 0 4 3 6 {{count:-1}} {{count:10}} -# Test that both ends of the time range selected are inclusive. -eval instant at 4m59s count_over_time(metric[4m:30s]) +# Test that the time range selected is left-open and right-closed. +eval instant at 4m59s count_over_time(metric[4m1s:30s]) {type="floats"} 8 {type="histograms"} 8 {type="mixed"} 8 -eval instant at 5m count_over_time(metric[4m:30s]) +eval instant at 5m count_over_time(metric[4m1s:30s]) {type="floats"} 9 {type="histograms"} 9 {type="mixed"} 9 -eval instant at 5m count_over_time(metric[3m59s:30s]) +eval instant at 5m count_over_time(metric[4m:30s]) {type="floats"} 8 {type="histograms"} 8 {type="mixed"} 8 -eval range from 4m59s to 5m step 1s count_over_time(metric[4m:30s]) +eval range from 4m59s to 5m step 1s count_over_time(metric[4m1s:30s]) {type="floats"} 8 9 {type="histograms"} 8 9 {type="mixed"} 8 9 -eval range from 5m to 5m1s step 1s count_over_time(metric[3m59s:30s]) +eval range from 5m to 5m1s step 1s count_over_time(metric[4m:30s]) {type="floats"} 8 8 {type="histograms"} 8 8 {type="mixed"} 8 8 @@ -41,9 +41,9 @@ eval instant at 5m last_over_time(metric[4m:3m]) metric{type="mixed"} 6 eval range from 0 to 14m step 1m last_over_time(metric[4m:3m]) - metric{type="floats"} 0 0 0 6 6 6 10 10 10 10 10 10 10 10 _ - metric{type="histograms"} {{count:0}} {{count:0}} {{count:0}} {{count:6}} {{count:6}} {{count:6}} {{count:10}} {{count:10}} {{count:10}} {{count:10}} {{count:10}} {{count:10}} {{count:10}} {{count:10}} _ - metric{type="mixed"} 0 0 0 6 6 6 {{count:10}} {{count:10}} {{count:10}} {{count:10}} {{count:10}} {{count:10}} {{count:10}} {{count:10}} _ + metric{type="floats"} 0 0 0 6 6 6 10 10 10 10 10 10 10 _ _ + metric{type="histograms"} {{count:0}} {{count:0}} {{count:0}} {{count:6}} {{count:6}} {{count:6}} {{count:10}} {{count:10}} {{count:10}} {{count:10}} {{count:10}} {{count:10}} {{count:10}} _ _ + metric{type="mixed"} 0 0 0 6 6 6 {{count:10}} {{count:10}} {{count:10}} {{count:10}} {{count:10}} {{count:10}} {{count:10}} _ _ # Subquery with @ eval instant at 5m last_over_time(metric[2m:1m] @ 1m) @@ -86,12 +86,12 @@ eval range from 0 to 5m step 1m last_over_time(metric[2m:1m] offset 1m) metric{type="mixed"} _ 0 4 3 6 {{count:-1}} # Subquery range smaller than subquery step -eval instant at 5m last_over_time(metric[1m:2m]) +eval instant at 5m last_over_time(metric[1m1s:2m]) metric{type="floats"} -1 metric{type="histograms"} {{count:-1}} metric{type="mixed"} {{count:-1}} -eval range from 0 to 5m step 1m last_over_time(metric[1m:2m]) +eval range from 0 to 5m step 1m last_over_time(metric[1m1s:2m]) metric{type="floats"} 0 0 3 3 -1 -1 metric{type="histograms"} {{count:0}} {{count:0}} {{count:3}} {{count:3}} {{count:-1}} {{count:-1}} metric{type="mixed"} 0 0 3 3 {{count:-1}} {{count:-1}} @@ -119,13 +119,13 @@ clear load 1m metric 0 1 2 3 4 -eval range from 0 to 4m step 15s sum_over_time(metric[2m:30s]) +eval range from 0 to 4m step 15s sum_over_time(metric[2m1s:30s]) {} 0 0 0 0 1 1 2 2 4 4 6 6 9 8 11 10 14 -eval range from 0 to 4m step 20s sum_over_time(sum_over_time(metric[2m:30s])[3m:15s]) +eval range from 0 to 4m step 20s sum_over_time(sum_over_time(metric[2m1s:30s])[3m1s:15s]) {} 0 0 0 1 2 4 10 14 20 35 43 54 78 -eval range from 0 to 4m step 3m sum_over_time(sum_over_time(sum_over_time(metric[2m:30s])[3m:15s])[4m:20s]) +eval range from 0 to 4m step 3m sum_over_time(sum_over_time(sum_over_time(metric[2m1s:30s])[3m1s:15s])[4m1s:20s]) {} 0 86 eval range from 0 to 4m step 15s last_over_time((metric > Inf)[20s:10s]) diff --git a/pkg/streamingpromql/testdata/upstream/aggregators.test b/pkg/streamingpromql/testdata/upstream/aggregators.test index 3caed5370a3..39434a9797e 100644 --- a/pkg/streamingpromql/testdata/upstream/aggregators.test +++ b/pkg/streamingpromql/testdata/upstream/aggregators.test @@ -234,13 +234,28 @@ load 5m http_requests{job="api-server", instance="0", group="canary"} NaN http_requests{job="api-server", instance="1", group="canary"} 3 http_requests{job="api-server", instance="2", group="canary"} 4 + http_requests_histogram{job="api-server", instance="3", group="canary"} {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}} eval instant at 0m max(http_requests) {} 4 +# The histogram is ignored here so the result doesn't change but it has an info annotation now. +eval_info instant at 0m max({job="api-server"}) + {} 4 + +# The histogram is ignored here so there is no result but it has an info annotation now. +eval_info instant at 0m max(http_requests_histogram) + eval instant at 0m min(http_requests) {} 1 +# The histogram is ignored here so the result doesn't change but it has an info annotation now. +eval_info instant at 0m min({job="api-server"}) + {} 1 + +# The histogram is ignored here so there is no result but it has an info annotation now. +eval_info instant at 0m min(http_requests_histogram) + eval instant at 0m max by (group) (http_requests) {group="production"} 2 {group="canary"} 4 @@ -255,7 +270,7 @@ clear load 5m http_requests{job="api-server", instance="0", group="production"} 0+10x10 http_requests{job="api-server", instance="1", group="production"} 0+20x10 - http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN + http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN http_requests{job="api-server", instance="0", group="canary"} 0+30x10 http_requests{job="api-server", instance="1", group="canary"} 0+40x10 http_requests{job="app-server", instance="0", group="production"} 0+50x10 @@ -356,21 +371,21 @@ load 5m version{job="app-server", instance="1", group="canary"} 7 # Unsupported by streaming engine. -# eval instant at 5m count_values("version", version) +# eval instant at 1m count_values("version", version) # {version="6"} 5 # {version="7"} 2 # {version="8"} 2 # Unsupported by streaming engine. -# eval instant at 5m count_values(((("version"))), version) +# eval instant at 1m count_values(((("version"))), version) # {version="6"} 5 # {version="7"} 2 # {version="8"} 2 # Unsupported by streaming engine. -# eval instant at 5m count_values without (instance)("version", version) +# eval instant at 1m count_values without (instance)("version", version) # {job="api-server", group="production", version="6"} 3 # {job="api-server", group="canary", version="8"} 2 # {job="app-server", group="production", version="6"} 2 @@ -378,14 +393,14 @@ load 5m # Overwrite label with output. Don't do this. # Unsupported by streaming engine. -# eval instant at 5m count_values without (instance)("job", version) +# eval instant at 1m count_values without (instance)("job", version) # {job="6", group="production"} 5 # {job="8", group="canary"} 2 # {job="7", group="canary"} 2 # Overwrite label with output. Don't do this. # Unsupported by streaming engine. -# eval instant at 5m count_values by (job, group)("job", version) +# eval instant at 1m count_values by (job, group)("job", version) # {job="6", group="production"} 5 # {job="8", group="canary"} 2 # {job="7", group="canary"} 2 @@ -403,6 +418,7 @@ load 10s data{test="uneven samples",point="a"} 0 data{test="uneven samples",point="b"} 1 data{test="uneven samples",point="c"} 4 + data_histogram{test="histogram sample", point="c"} {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}} foo .8 # Unsupported by streaming engine. @@ -411,6 +427,17 @@ load 10s # {test="three samples"} 1.6 # {test="uneven samples"} 2.8 +# The histogram is ignored here so the result doesn't change but it has an info annotation now. +# Unsupported by streaming engine. +# eval_info instant at 1m quantile without(point)(0.8, {__name__=~"data(_histogram)?"}) +# {test="two samples"} 0.8 +# {test="three samples"} 1.6 +# {test="uneven samples"} 2.8 + +# The histogram is ignored here so there is no result but it has an info annotation now. +# Unsupported by streaming engine. +# eval_info instant at 1m quantile(0.8, data_histogram) + # Bug #5276. # Unsupported by streaming engine. # eval instant at 1m quantile without(point)(scalar(foo), data) @@ -599,3 +626,166 @@ clear # #eval instant at 1m count(topk(1,max(up) without()) == topk(1,max(up) without()) == topk(1,max(up) without()) == topk(1,max(up) without()) == topk(1,max(up) without())) # {} 1 + +clear + +# Test stddev produces consistent results regardless the order the data is loaded in. +load 5m + series{label="a"} 1 + series{label="b"} 2 + series{label="c"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}} + +# The histogram is ignored here so the result doesn't change but it has an info annotation now. +eval_info instant at 0m stddev(series) + {} 0.5 + +eval_info instant at 0m stdvar(series) + {} 0.25 + +# The histogram is ignored here so there is no result but it has an info annotation now. +eval_info instant at 0m stddev({label="c"}) + +eval_info instant at 0m stdvar({label="c"}) + +eval instant at 0m stddev by (label) (series) + {label="a"} 0 + {label="b"} 0 + +eval instant at 0m stdvar by (label) (series) + {label="a"} 0 + {label="b"} 0 + +clear + +load 5m + series{label="a"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}} + series{label="b"} 1 + series{label="c"} 2 + +eval instant at 0m stddev(series) + {} 0.5 + +eval instant at 0m stdvar(series) + {} 0.25 + +eval instant at 0m stddev by (label) (series) + {label="b"} 0 + {label="c"} 0 + +eval instant at 0m stdvar by (label) (series) + {label="b"} 0 + {label="c"} 0 + +clear + +load 5m + series{label="a"} 1 + series{label="b"} 2 + series{label="c"} NaN + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN + +eval instant at 0m stddev by (label) (series) + {label="a"} 0 + {label="b"} 0 + {label="c"} NaN + +eval instant at 0m stdvar by (label) (series) + {label="a"} 0 + {label="b"} 0 + {label="c"} NaN + +clear + +load 5m + series{label="a"} NaN + series{label="b"} 1 + series{label="c"} 2 + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN + +eval instant at 0m stddev by (label) (series) + {label="a"} NaN + {label="b"} 0 + {label="c"} 0 + +eval instant at 0m stdvar by (label) (series) + {label="a"} NaN + {label="b"} 0 + {label="c"} 0 + +clear + +load 5m + series NaN + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN + +clear + +load 5m + series{label="a"} 1 + series{label="b"} 2 + series{label="c"} inf + +eval instant at 0m stddev (series) + {} NaN + +eval instant at 0m stdvar (series) + {} NaN + +eval instant at 0m stddev by (label) (series) + {label="a"} 0 + {label="b"} 0 + {label="c"} NaN + +eval instant at 0m stdvar by (label) (series) + {label="a"} 0 + {label="b"} 0 + {label="c"} NaN + +clear + +load 5m + series{label="a"} inf + series{label="b"} 1 + series{label="c"} 2 + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN + +eval instant at 0m stddev by (label) (series) + {label="a"} NaN + {label="b"} 0 + {label="c"} 0 + +eval instant at 0m stdvar by (label) (series) + {label="a"} NaN + {label="b"} 0 + {label="c"} 0 + +clear + +load 5m + series inf + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN diff --git a/pkg/streamingpromql/testdata/upstream/at_modifier.test b/pkg/streamingpromql/testdata/upstream/at_modifier.test index 1dc18eff70d..da939526531 100644 --- a/pkg/streamingpromql/testdata/upstream/at_modifier.test +++ b/pkg/streamingpromql/testdata/upstream/at_modifier.test @@ -126,45 +126,43 @@ eval instant at 25s sum_over_time(metric{job="1"}[100:1] offset 20 @ 100) # Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries. # Inner most sum=1+2+...+10=55. -# With [100s:25s] subquery, it's 55*5. +# With [100s:25s] subquery, it's 55*4. eval instant at 100s sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50) - {job="1"} 275 + {job="1"} 220 # Nested subqueries with different timestamps on both. # Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries. -# Sum of innermost subquery is 275 as above. The outer subquery repeats it 4 times. +# Sum of innermost subquery is 220 as above. The outer subquery repeats it 3 times. eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50)[3s:1s] @ 3000) - {job="1"} 1100 + {job="1"} 660 # Testing the inner subquery timestamp since vector selector does not have @. # Inner sum for subquery [100s:25s] @ 50 are -# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=4+5=9. -# This sum of 11 is repeated 4 times by outer subquery. +# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=5. +# This sum of 7 is repeated 3 times by outer subquery. eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 50)[3s:1s] @ 200) - {job="1"} 44 + {job="1"} 21 # Inner sum for subquery [100s:25s] @ 200 are -# at 100=9+10, at 125=12, at 150=14+15, at 175=17, at 200=19+20. -# This sum of 116 is repeated 4 times by outer subquery. +# at 125=12, at 150=15, at 175=17, at 200=20. +# This sum of 64 is repeated 3 times by outer subquery. eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 200)[3s:1s] @ 50) - {job="1"} 464 + {job="1"} 192 # Nested subqueries with timestamp only on outer subquery. # Outer most subquery: -# at 900=783 -# inner subquery: at 870=87+86+85, at 880=88+87+86, at 890=89+88+87 -# at 925=537 -# inner subquery: at 895=89+88, at 905=90+89, at 915=90+91 -# at 950=828 -# inner subquery: at 920=92+91+90, at 930=93+92+91, at 940=94+93+92 -# at 975=567 -# inner subquery: at 945=94+93, at 955=95+94, at 965=96+95 -# at 1000=873 -# inner subquery: at 970=97+96+95, at 980=98+97+96, at 990=99+98+97 +# at 925=360 +# inner subquery: at 905=90+89, at 915=91+90 +# at 950=372 +# inner subquery: at 930=93+92, at 940=94+93 +# at 975=380 +# inner subquery: at 955=95+94, at 965=96+95 +# at 1000=392 +# inner subquery: at 980=98+97, at 990=99+98 eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[20s])[20s:10s] offset 10s)[100s:25s] @ 1000) - {job="1"} 3588 + {job="1"} 1504 # minute is counted on the value of the sample. # Unsupported by streaming engine. @@ -192,37 +190,37 @@ eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[20s # minute is counted on the value of the sample. # Unsupported by streaming engine. # eval instant at 0s sum_over_time(minute(metric @ 1500)[100s:10s]) -# {job="1"} 22 -# {job="2"} 55 +# {job="1"} 20 +# {job="2"} 50 # If nothing passed, minute() takes eval time. # Here the eval time is determined by the subquery. # [50m:1m] at 6000, i.e. 100m, is 50m to 100m. -# sum=50+51+52+...+59+0+1+2+...+40. +# sum=51+52+...+59+0+1+2+...+40. # Unsupported by streaming engine. # eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000) -# {} 1365 +# {} 1315 -# sum=45+46+47+...+59+0+1+2+...+35. +# sum=46+47+...+59+0+1+2+...+35. # Unsupported by streaming engine. # eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000 offset 5m) -# {} 1410 +# {} 1365 # time() is the eval time which is determined by subquery here. -# 2900+2901+...+3000 = (3000*3001 - 2899*2900)/2. +# 2901+...+3000 = (3000*3001 - 2899*2900)/2. # Unsupported by streaming engine. # eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000) -# {} 297950 +# {} 295050 -# 2300+2301+...+2400 = (2400*2401 - 2299*2300)/2. +# 2301+...+2400 = (2400*2401 - 2299*2300)/2. # Unsupported by streaming engine. # eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000 offset 600s) -# {} 237350 +# {} 235050 # timestamp() takes the time of the sample and not the evaluation time. # Unsupported by streaming engine. # eval instant at 0s sum_over_time(timestamp(metric{job="1"} @ 10)[100s:10s] @ 3000) -# {job="1"} 110 +# {job="1"} 100 # The result of inner timestamp() will have the timestamp as the # eval time, hence entire expression is not step invariant and depends on eval time. diff --git a/pkg/streamingpromql/testdata/upstream/functions.test b/pkg/streamingpromql/testdata/upstream/functions.test index 12bf8f0711d..e97d2a11025 100644 --- a/pkg/streamingpromql/testdata/upstream/functions.test +++ b/pkg/streamingpromql/testdata/upstream/functions.test @@ -11,11 +11,13 @@ load 5m # Tests for resets(). eval instant at 50m resets(http_requests[5m]) + +eval instant at 50m resets(http_requests[10m]) {path="/foo"} 0 {path="/bar"} 0 {path="/biz"} 0 -eval instant at 50m resets(http_requests[300]) +eval instant at 50m resets(http_requests[600]) {path="/foo"} 0 {path="/bar"} 0 {path="/biz"} 0 @@ -26,6 +28,11 @@ eval instant at 50m resets(http_requests[20m]) {path="/biz"} 0 eval instant at 50m resets(http_requests[30m]) + {path="/foo"} 1 + {path="/bar"} 0 + {path="/biz"} 0 + +eval instant at 50m resets(http_requests[32m]) {path="/foo"} 2 {path="/bar"} 1 {path="/biz"} 0 @@ -39,28 +46,30 @@ eval instant at 50m resets(nonexistent_metric[50m]) # Tests for changes(). eval instant at 50m changes(http_requests[5m]) + +eval instant at 50m changes(http_requests[6m]) {path="/foo"} 0 {path="/bar"} 0 {path="/biz"} 0 eval instant at 50m changes(http_requests[20m]) - {path="/foo"} 3 - {path="/bar"} 3 + {path="/foo"} 2 + {path="/bar"} 2 {path="/biz"} 0 eval instant at 50m changes(http_requests[30m]) - {path="/foo"} 4 - {path="/bar"} 5 - {path="/biz"} 1 + {path="/foo"} 3 + {path="/bar"} 4 + {path="/biz"} 0 eval instant at 50m changes(http_requests[50m]) - {path="/foo"} 8 - {path="/bar"} 9 + {path="/foo"} 7 + {path="/bar"} 8 {path="/biz"} 1 eval instant at 50m changes((http_requests[50m])) - {path="/foo"} 8 - {path="/bar"} 9 + {path="/foo"} 7 + {path="/bar"} 8 {path="/biz"} 1 eval instant at 50m changes(nonexistent_metric[50m]) @@ -71,7 +80,7 @@ load 5m x{a="b"} NaN NaN NaN x{a="c"} 0 NaN 0 -eval instant at 15m changes(x[15m]) +eval instant at 15m changes(x[20m]) {a="b"} 0 {a="c"} 2 @@ -80,14 +89,14 @@ clear # Tests for increase(). load 5m http_requests{path="/foo"} 0+10x10 - http_requests{path="/bar"} 0+10x5 0+10x5 + http_requests{path="/bar"} 0+18x5 0+18x5 http_requests{path="/dings"} 10+10x10 http_requests{path="/bumms"} 1+10x10 # Tests for increase(). eval instant at 50m increase(http_requests[50m]) {path="/foo"} 100 - {path="/bar"} 90 + {path="/bar"} 160 {path="/dings"} 100 {path="/bumms"} 100 @@ -100,7 +109,7 @@ eval instant at 50m increase(http_requests[50m]) # value, and therefore the extrapolation happens only by 30s. eval instant at 50m increase(http_requests[100m]) {path="/foo"} 100 - {path="/bar"} 90 + {path="/bar"} 162 {path="/dings"} 105 {path="/bumms"} 101 @@ -120,15 +129,17 @@ clear # Tests for rate(). load 5m - testcounter_reset_middle 0+10x4 0+10x5 + testcounter_reset_middle 0+27x4 0+27x5 testcounter_reset_end 0+10x9 0 10 # Counter resets at in the middle of range are handled correctly by rate(). eval instant at 50m rate(testcounter_reset_middle[50m]) - {} 0.03 + {} 0.08 # Counter resets at end of range are ignored by rate(). eval instant at 50m rate(testcounter_reset_end[5m]) + +eval instant at 50m rate(testcounter_reset_end[6m]) {} 0 clear @@ -252,29 +263,29 @@ eval instant at 50m deriv(testcounter_reset_middle[100m]) # intercept at t=3000+3600: 76.81818181818181 # Unsupported by streaming engine. # eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600) -# {} 76.81818181818181 +# {} 70 # Unsupported by streaming engine. # eval instant at 50m predict_linear(testcounter_reset_middle[50m], 1h) -# {} 76.81818181818181 +# {} 70 # intercept at t = 3000+3600 = 6600 # Unsupported by streaming engine. -# eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) +# eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) # {} 76.81818181818181 # Unsupported by streaming engine. -# eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 1h) +# eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 1h) # {} 76.81818181818181 # intercept at t = 600+3600 = 4200 # Unsupported by streaming engine. -# eval instant at 10m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) +# eval instant at 10m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) # {} 51.36363636363637 # intercept at t = 4200+3600 = 7800 # Unsupported by streaming engine. -# eval instant at 70m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) +# eval instant at 70m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) # {} 89.54545454545455 # With http_requests, there is a sample value exactly at the end of @@ -496,7 +507,7 @@ load 5m http_requests{job="api-server", instance="1", group="production"} 0+20x10 http_requests{job="api-server", instance="0", group="canary"} 0+30x10 http_requests{job="api-server", instance="1", group="canary"} 0+40x10 - http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN + http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN http_requests{job="app-server", instance="0", group="production"} 0+50x10 http_requests{job="app-server", instance="1", group="production"} 0+60x10 http_requests{job="app-server", instance="0", group="canary"} 0+70x10 @@ -533,7 +544,7 @@ load 5m http_requests{job="api-server", instance="1", group="production"} 0+20x10 http_requests{job="api-server", instance="0", group="canary"} 0+30x10 http_requests{job="api-server", instance="1", group="canary"} 0+40x10 - http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN + http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN http_requests{job="app-server", instance="0", group="production"} 0+50x10 http_requests{job="app-server", instance="1", group="production"} 0+60x10 http_requests{job="app-server", instance="0", group="canary"} 0+70x10 @@ -682,7 +693,7 @@ load 5m # node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 100 # node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 100 -# Tests for holt_winters +# Tests for double_exponential_smoothing clear # positive trends @@ -693,7 +704,7 @@ load 10s http_requests{job="api-server", instance="1", group="canary"} 0+40x2000 # Unsupported by streaming engine. -# eval instant at 8000s holt_winters(http_requests[1m], 0.01, 0.1) +# eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) # {job="api-server", instance="0", group="production"} 8000 # {job="api-server", instance="1", group="production"} 16000 # {job="api-server", instance="0", group="canary"} 24000 @@ -708,7 +719,7 @@ load 10s http_requests{job="api-server", instance="1", group="canary"} 0-40x1000 0+40x1000 # Unsupported by streaming engine. -# eval instant at 8000s holt_winters(http_requests[1m], 0.01, 0.1) +# eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) # {job="api-server", instance="0", group="production"} 0 # {job="api-server", instance="1", group="production"} -16000 # {job="api-server", instance="0", group="canary"} 24000 @@ -732,10 +743,10 @@ load 10s metric9 -9.988465674311579e+307 -9.988465674311579e+307 -9.988465674311579e+307 metric10 -9.988465674311579e+307 9.988465674311579e+307 -eval instant at 1m avg_over_time(metric[1m]) +eval instant at 55s avg_over_time(metric[1m]) {} 3 -eval instant at 1m sum_over_time(metric[1m])/count_over_time(metric[1m]) +eval instant at 55s sum_over_time(metric[1m])/count_over_time(metric[1m]) {} 3 eval instant at 1m avg_over_time(metric2[1m]) @@ -802,8 +813,8 @@ eval instant at 1m avg_over_time(metric8[1m]) {} 9.988465674311579e+307 # This overflows float64. -eval instant at 1m sum_over_time(metric8[1m])/count_over_time(metric8[1m]) - {} Inf +eval instant at 1m sum_over_time(metric8[2m])/count_over_time(metric8[2m]) + {} +Inf eval instant at 1m avg_over_time(metric9[1m]) {} -9.988465674311579e+307 @@ -812,10 +823,16 @@ eval instant at 1m avg_over_time(metric9[1m]) eval instant at 1m sum_over_time(metric9[1m])/count_over_time(metric9[1m]) {} -Inf -eval instant at 1m avg_over_time(metric10[1m]) +eval instant at 45s avg_over_time(metric10[1m]) {} 0 -eval instant at 1m sum_over_time(metric10[1m])/count_over_time(metric10[1m]) +eval instant at 1m avg_over_time(metric10[2m]) + {} 0 + +eval instant at 45s sum_over_time(metric10[1m])/count_over_time(metric10[1m]) + {} 0 + +eval instant at 1m sum_over_time(metric10[2m])/count_over_time(metric10[2m]) {} 0 # Test if very big intermediate values cause loss of detail. @@ -823,10 +840,10 @@ clear load 10s metric 1 1e100 1 -1e100 -eval instant at 1m sum_over_time(metric[1m]) +eval instant at 1m sum_over_time(metric[2m]) {} 2 -eval instant at 1m avg_over_time(metric[1m]) +eval instant at 1m avg_over_time(metric[2m]) {} 0.5 # Tests for stddev_over_time and stdvar_over_time. @@ -835,15 +852,15 @@ load 10s metric 0 8 8 2 3 # Unsupported by streaming engine. -# eval instant at 1m stdvar_over_time(metric[1m]) +# eval instant at 1m stdvar_over_time(metric[2m]) # {} 10.56 # Unsupported by streaming engine. -# eval instant at 1m stddev_over_time(metric[1m]) +# eval instant at 1m stddev_over_time(metric[2m]) # {} 3.249615 # Unsupported by streaming engine. -# eval instant at 1m stddev_over_time((metric[1m])) +# eval instant at 1m stddev_over_time((metric[2m])) # {} 3.249615 # Tests for stddev_over_time and stdvar_over_time #4927. @@ -877,49 +894,49 @@ load 10s data{test="uneven samples"} 0 1 4 # Unsupported by streaming engine. -# eval instant at 1m quantile_over_time(0, data[1m]) +# eval instant at 1m quantile_over_time(0, data[2m]) # {test="two samples"} 0 # {test="three samples"} 0 # {test="uneven samples"} 0 # Unsupported by streaming engine. -# eval instant at 1m quantile_over_time(0.5, data[1m]) +# eval instant at 1m quantile_over_time(0.5, data[2m]) # {test="two samples"} 0.5 # {test="three samples"} 1 # {test="uneven samples"} 1 # Unsupported by streaming engine. -# eval instant at 1m quantile_over_time(0.75, data[1m]) +# eval instant at 1m quantile_over_time(0.75, data[2m]) # {test="two samples"} 0.75 # {test="three samples"} 1.5 # {test="uneven samples"} 2.5 # Unsupported by streaming engine. -# eval instant at 1m quantile_over_time(0.8, data[1m]) +# eval instant at 1m quantile_over_time(0.8, data[2m]) # {test="two samples"} 0.8 # {test="three samples"} 1.6 # {test="uneven samples"} 2.8 # Unsupported by streaming engine. -# eval instant at 1m quantile_over_time(1, data[1m]) +# eval instant at 1m quantile_over_time(1, data[2m]) # {test="two samples"} 1 # {test="three samples"} 2 # {test="uneven samples"} 4 # Unsupported by streaming engine. -# eval_warn instant at 1m quantile_over_time(-1, data[1m]) +# eval_warn instant at 1m quantile_over_time(-1, data[2m]) # {test="two samples"} -Inf # {test="three samples"} -Inf # {test="uneven samples"} -Inf # Unsupported by streaming engine. -# eval_warn instant at 1m quantile_over_time(2, data[1m]) +# eval_warn instant at 1m quantile_over_time(2, data[2m]) # {test="two samples"} +Inf # {test="three samples"} +Inf # {test="uneven samples"} +Inf # Unsupported by streaming engine. -# eval_warn instant at 1m (quantile_over_time(2, (data[1m]))) +# eval_warn instant at 1m (quantile_over_time(2, (data[2m]))) # {test="two samples"} +Inf # {test="three samples"} +Inf # {test="uneven samples"} +Inf @@ -1052,21 +1069,21 @@ load 10s data{type="some_nan3"} NaN 0 1 data{type="only_nan"} NaN NaN NaN -eval instant at 1m min_over_time(data[1m]) +eval instant at 1m min_over_time(data[2m]) {type="numbers"} 0 {type="some_nan"} 0 {type="some_nan2"} 1 {type="some_nan3"} 0 {type="only_nan"} NaN -eval instant at 1m max_over_time(data[1m]) +eval instant at 1m max_over_time(data[2m]) {type="numbers"} 3 {type="some_nan"} 2 {type="some_nan2"} 2 {type="some_nan3"} 1 {type="only_nan"} NaN -eval instant at 1m last_over_time(data[1m]) +eval instant at 1m last_over_time(data[2m]) data{type="numbers"} 3 data{type="some_nan"} NaN data{type="some_nan2"} 1 @@ -1183,16 +1200,24 @@ load 1m # Unsupported by streaming engine. # eval instant at 15m absent_over_time(http_requests[5m]) +# {} 1 # Unsupported by streaming engine. -# eval instant at 16m absent_over_time(http_requests[5m]) -# {} 1 +# eval instant at 15m absent_over_time(http_requests[10m]) # Unsupported by streaming engine. # eval instant at 16m absent_over_time(http_requests[6m]) +# {} 1 + +# Unsupported by streaming engine. +# eval instant at 16m absent_over_time(http_requests[16m]) # Unsupported by streaming engine. # eval instant at 16m absent_over_time(httpd_handshake_failures_total[1m]) +# {} 1 + +# Unsupported by streaming engine. +# eval instant at 16m absent_over_time(httpd_handshake_failures_total[2m]) # Unsupported by streaming engine. # eval instant at 16m absent_over_time({instance="127.0.0.1"}[5m]) @@ -1255,17 +1280,18 @@ eval instant at 0m present_over_time(httpd_log_lines_total[30s]) eval instant at 1m present_over_time(httpd_log_lines_total[30s]) eval instant at 15m present_over_time(http_requests[5m]) + +eval instant at 15m present_over_time(http_requests[10m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 -eval instant at 16m present_over_time(http_requests[5m]) - eval instant at 16m present_over_time(http_requests[6m]) + +eval instant at 16m present_over_time(http_requests[16m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 eval instant at 16m present_over_time(httpd_handshake_failures_total[1m]) - {instance="127.0.0.1", job="node"} 1 eval instant at 16m present_over_time({instance="127.0.0.1"}[5m]) {instance="127.0.0.1",job="node"} 1 @@ -1286,59 +1312,59 @@ load 5m exp_root_log{l="x"} 10 exp_root_log{l="y"} 20 -eval instant at 5m exp(exp_root_log) +eval instant at 1m exp(exp_root_log) {l="x"} 22026.465794806718 {l="y"} 485165195.4097903 -eval instant at 5m exp(exp_root_log - 10) +eval instant at 1m exp(exp_root_log - 10) {l="y"} 22026.465794806718 {l="x"} 1 -eval instant at 5m exp(exp_root_log - 20) +eval instant at 1m exp(exp_root_log - 20) {l="x"} 4.5399929762484854e-05 {l="y"} 1 -eval instant at 5m ln(exp_root_log) +eval instant at 1m ln(exp_root_log) {l="x"} 2.302585092994046 {l="y"} 2.995732273553991 -eval instant at 5m ln(exp_root_log - 10) +eval instant at 1m ln(exp_root_log - 10) {l="y"} 2.302585092994046 {l="x"} -Inf -eval instant at 5m ln(exp_root_log - 20) +eval instant at 1m ln(exp_root_log - 20) {l="y"} -Inf {l="x"} NaN -eval instant at 5m exp(ln(exp_root_log)) +eval instant at 1m exp(ln(exp_root_log)) {l="y"} 20 {l="x"} 10 -eval instant at 5m sqrt(exp_root_log) +eval instant at 1m sqrt(exp_root_log) {l="x"} 3.1622776601683795 {l="y"} 4.47213595499958 -eval instant at 5m log2(exp_root_log) +eval instant at 1m log2(exp_root_log) {l="x"} 3.3219280948873626 {l="y"} 4.321928094887363 -eval instant at 5m log2(exp_root_log - 10) +eval instant at 1m log2(exp_root_log - 10) {l="y"} 3.3219280948873626 {l="x"} -Inf -eval instant at 5m log2(exp_root_log - 20) +eval instant at 1m log2(exp_root_log - 20) {l="x"} NaN {l="y"} -Inf -eval instant at 5m log10(exp_root_log) +eval instant at 1m log10(exp_root_log) {l="x"} 1 {l="y"} 1.301029995663981 -eval instant at 5m log10(exp_root_log - 10) +eval instant at 1m log10(exp_root_log - 10) {l="y"} 1 {l="x"} -Inf -eval instant at 5m log10(exp_root_log - 20) +eval instant at 1m log10(exp_root_log - 20) {l="x"} NaN {l="y"} -Inf @@ -1352,3 +1378,12 @@ load 1m # Unsupported by streaming engine. # eval range from 0 to 61s step 1s timestamp(metric) # {} 0x59 60 60 + +clear + +# Check round with mixed data types +load 1m + mixed_metric {{schema:0 sum:5 count:4 buckets:[1 2 1]}} 1 2 3 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:0 sum:8 count:6 buckets:[1 4 1]}} + +eval range from 0 to 5m step 1m round(mixed_metric) + {} _ 1 2 3 diff --git a/pkg/streamingpromql/testdata/upstream/histograms.test b/pkg/streamingpromql/testdata/upstream/histograms.test index 43dff1594d8..41a7666b5f6 100644 --- a/pkg/streamingpromql/testdata/upstream/histograms.test +++ b/pkg/streamingpromql/testdata/upstream/histograms.test @@ -119,7 +119,7 @@ eval instant at 50m testhistogram3_sum # {start="negative"} 0 # Unsupported by streaming engine. -# eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m])) +# eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[10m])) # {start="positive"} 0.6363636363636364 # {start="negative"} 0 @@ -129,7 +129,7 @@ eval instant at 50m testhistogram3_sum eval instant at 50m testhistogram3_bucket{le=".2"} / ignoring(le) testhistogram3_count {start="positive"} 0.6363636363636364 -eval instant at 50m rate(testhistogram3_bucket{le=".2"}[5m]) / ignoring(le) rate(testhistogram3_count[5m]) +eval instant at 50m rate(testhistogram3_bucket{le=".2"}[10m]) / ignoring(le) rate(testhistogram3_count[10m]) {start="positive"} 0.6363636363636364 # Test histogram_quantile, native and classic. @@ -277,34 +277,33 @@ eval instant at 50m rate(testhistogram3_bucket{le=".2"}[5m]) / ignoring(le) rate # {start="negative"} 0.3 # More realistic with rates. - # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.2, rate(testhistogram[5m])) +# eval instant at 50m histogram_quantile(0.2, rate(testhistogram[10m])) # {start="positive"} 0.048 # {start="negative"} -0.2 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m])) +# eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[10m])) # {start="positive"} 0.048 # {start="negative"} -0.2 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.5, rate(testhistogram[5m])) +# eval instant at 50m histogram_quantile(0.5, rate(testhistogram[10m])) # {start="positive"} 0.15 # {start="negative"} -0.15 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m])) +# eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[10m])) # {start="positive"} 0.15 # {start="negative"} -0.15 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.8, rate(testhistogram[5m])) +# eval instant at 50m histogram_quantile(0.8, rate(testhistogram[10m])) # {start="positive"} 0.72 # {start="negative"} 0.3 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m])) +# eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[10m])) # {start="positive"} 0.72 # {start="negative"} 0.3 @@ -362,138 +361,135 @@ eval instant at 50m rate(testhistogram3_bucket{le=".2"}[5m]) / ignoring(le) rate # don't require aggregation by le. # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m]))) +# eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m]))) # {} 0.075 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le)) +# eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le)) # {} 0.075 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m]))) +# eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m]))) # {} 0.1277777777777778 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le)) +# eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le)) # {} 0.1277777777777778 # Aggregated histogram: Everything in one. Now with avg, which does not change anything. # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[5m]))) +# eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[10m]))) # {} 0.075 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le)) +# eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[10m])) by (le)) # {} 0.075 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[5m]))) +# eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[10m]))) # {} 0.12777777777777778 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le)) +# eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[10m])) by (le)) # {} 0.12777777777777778 # Aggregated histogram: By instance. # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (instance)) +# eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (instance)) # {instance="ins1"} 0.075 # {instance="ins2"} 0.075 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) +# eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance)) # {instance="ins1"} 0.075 # {instance="ins2"} 0.075 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (instance)) +# eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (instance)) # {instance="ins1"} 0.1333333333 # {instance="ins2"} 0.125 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) +# eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance)) # {instance="ins1"} 0.1333333333 # {instance="ins2"} 0.125 # Aggregated histogram: By job. - # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job)) +# eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job)) # {job="job1"} 0.1 # {job="job2"} 0.0642857142857143 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) +# eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job)) # {job="job1"} 0.1 # {job="job2"} 0.0642857142857143 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job)) +# eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job)) # {job="job1"} 0.14 # {job="job2"} 0.1125 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) +# eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job)) # {job="job1"} 0.14 # {job="job2"} 0.1125 # Aggregated histogram: By job and instance. - # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job, instance)) +# eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job, instance)) # {instance="ins1", job="job1"} 0.11 # {instance="ins2", job="job1"} 0.09 # {instance="ins1", job="job2"} 0.06 # {instance="ins2", job="job2"} 0.0675 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) +# eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance)) # {instance="ins1", job="job1"} 0.11 # {instance="ins2", job="job1"} 0.09 # {instance="ins1", job="job2"} 0.06 # {instance="ins2", job="job2"} 0.0675 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job, instance)) +# eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job, instance)) # {instance="ins1", job="job1"} 0.15 # {instance="ins2", job="job1"} 0.1333333333333333 # {instance="ins1", job="job2"} 0.1 # {instance="ins2", job="job2"} 0.1166666666666667 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) +# eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance)) # {instance="ins1", job="job1"} 0.15 # {instance="ins2", job="job1"} 0.1333333333333333 # {instance="ins1", job="job2"} 0.1 # {instance="ins2", job="job2"} 0.1166666666666667 # The unaggregated histogram for comparison. Same result as the previous one. - # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[5m])) +# eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[10m])) # {instance="ins1", job="job1"} 0.11 # {instance="ins2", job="job1"} 0.09 # {instance="ins1", job="job2"} 0.06 # {instance="ins2", job="job2"} 0.0675 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m])) +# eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[10m])) # {instance="ins1", job="job1"} 0.11 # {instance="ins2", job="job1"} 0.09 # {instance="ins1", job="job2"} 0.06 # {instance="ins2", job="job2"} 0.0675 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[5m])) +# eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[10m])) # {instance="ins1", job="job1"} 0.15 # {instance="ins2", job="job1"} 0.13333333333333333 # {instance="ins1", job="job2"} 0.1 # {instance="ins2", job="job2"} 0.11666666666666667 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m])) +# eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[10m])) # {instance="ins1", job="job1"} 0.15 # {instance="ins2", job="job1"} 0.13333333333333333 # {instance="ins1", job="job2"} 0.1 @@ -503,6 +499,25 @@ eval instant at 50m rate(testhistogram3_bucket{le=".2"}[5m]) / ignoring(le) rate eval instant at 50m sum(request_duration_seconds) {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} +eval instant at 50m sum(request_duration_seconds{job="job1",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job1",instance="ins2"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins2"}) + {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} + +eval instant at 50m avg(request_duration_seconds) + {} {{schema:-53 count:62.5 custom_values:[0.1 0.2] buckets:[25 22.5 15]}} + +# To verify the result above, calculate from classic histogram as well. +eval instant at 50m avg (request_duration_seconds_bucket{le="0.1"}) + {} 25 + +eval instant at 50m avg (request_duration_seconds_bucket{le="0.2"}) - avg (request_duration_seconds_bucket{le="0.1"}) + {} 22.5 + +eval instant at 50m avg (request_duration_seconds_bucket{le="+Inf"}) - avg (request_duration_seconds_bucket{le="0.2"}) + {} 15 + +eval instant at 50m count(request_duration_seconds) + {} 4 + # A histogram with nonmonotonic bucket counts. This may happen when recording # rule evaluation or federation races scrape ingestion, causing some buckets # counts to be derived from fewer samples. @@ -530,22 +545,22 @@ load 5m # Buckets with different representations of the same upper bound. # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[5m])) +# eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[10m])) # {instance="ins1", job="job1"} 0.15 # {instance="ins2", job="job1"} NaN # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.5, rate(mixed[5m])) +# eval instant at 50m histogram_quantile(0.5, rate(mixed[10m])) # {instance="ins1", job="job1"} 0.2 # {instance="ins2", job="job1"} NaN # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m])) +# eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[10m])) # {instance="ins1", job="job1"} 0.2 # {instance="ins2", job="job1"} NaN # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(1, rate(mixed_bucket[5m])) +# eval instant at 50m histogram_quantile(1, rate(mixed_bucket[10m])) # {instance="ins1", job="job1"} 0.2 # {instance="ins2", job="job1"} NaN @@ -555,7 +570,7 @@ load_with_nhcb 5m empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10 # Unsupported by streaming engine. -# eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m])) +# eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[10m])) # {instance="ins1", job="job1"} NaN # Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set. @@ -598,3 +613,37 @@ eval instant at 5m rate(const_histogram[5m]) # Unsupported by streaming engine. # eval instant at 5m histogram_quantile(1.0, sum(rate(const_histogram[5m]))) # {} NaN + +load_with_nhcb 1m + histogram_over_time_bucket{le="0"} 0 1 3 9 + histogram_over_time_bucket{le="1"} 2 3 3 9 + histogram_over_time_bucket{le="2"} 3 8 5 10 + histogram_over_time_bucket{le="4"} 3 10 6 18 + +# Test custom buckets with sum_over_time, avg_over_time. +eval instant at 3m sum_over_time(histogram_over_time[4m:1m]) + {} {{schema:-53 count:37 custom_values:[0 1 2 4] buckets:[13 4 9 11]}} + +eval instant at 3m avg_over_time(histogram_over_time[4m:1m]) + {} {{schema:-53 count:9.25 custom_values:[0 1 2 4] buckets:[3.25 1 2.25 2.75]}} + +# Test custom buckets with counter reset +load_with_nhcb 5m + histogram_with_reset_bucket{le="1"} 1 3 9 + histogram_with_reset_bucket{le="2"} 3 3 9 + histogram_with_reset_bucket{le="4"} 8 5 12 + histogram_with_reset_bucket{le="8"} 10 6 18 + histogram_with_reset_sum{} 36 16 61 + +eval instant at 10m increase(histogram_with_reset[15m]) + {} {{schema:-53 count:27 sum:91.5 custom_values:[1 2 4 8] counter_reset_hint:gauge buckets:[13.5 0 4.5 9]}} + +# Unsupported by streaming engine. +# eval instant at 10m resets(histogram_with_reset[15m]) +# {} 1 + +eval instant at 10m histogram_count(increase(histogram_with_reset[15m])) + {} 27 + +eval instant at 10m histogram_sum(increase(histogram_with_reset[15m])) + {} 91.5 diff --git a/pkg/streamingpromql/testdata/upstream/name_label_dropping.test b/pkg/streamingpromql/testdata/upstream/name_label_dropping.test index 87cc11ca791..4e58d2644fe 100644 --- a/pkg/streamingpromql/testdata/upstream/name_label_dropping.test +++ b/pkg/streamingpromql/testdata/upstream/name_label_dropping.test @@ -9,93 +9,93 @@ load 5m another_metric{env="1"} 60 120 180 # Does not drop __name__ for vector selector -eval instant at 15m metric{env="1"} +eval instant at 10m metric{env="1"} metric{env="1"} 120 # Drops __name__ for unary operators -eval instant at 15m -metric +eval instant at 10m -metric {env="1"} -120 # Drops __name__ for binary operators -eval instant at 15m metric + another_metric +eval instant at 10m metric + another_metric {env="1"} 300 # Does not drop __name__ for binary comparison operators -eval instant at 15m metric <= another_metric +eval instant at 10m metric <= another_metric metric{env="1"} 120 # Drops __name__ for binary comparison operators with "bool" modifier -eval instant at 15m metric <= bool another_metric +eval instant at 10m metric <= bool another_metric {env="1"} 1 # Drops __name__ for vector-scalar operations -eval instant at 15m metric * 2 +eval instant at 10m metric * 2 {env="1"} 240 # Drops __name__ for instant-vector functions -eval instant at 15m clamp(metric, 0, 100) +eval instant at 10m clamp(metric, 0, 100) {env="1"} 100 # Drops __name__ for round function -eval instant at 15m round(metric) +eval instant at 10m round(metric) {env="1"} 120 # Drops __name__ for range-vector functions -eval instant at 15m rate(metric{env="1"}[10m]) +eval instant at 10m rate(metric{env="1"}[10m]) {env="1"} 0.2 # Does not drop __name__ for last_over_time function -eval instant at 15m last_over_time(metric{env="1"}[10m]) +eval instant at 10m last_over_time(metric{env="1"}[10m]) metric{env="1"} 120 # Drops name for other _over_time functions -eval instant at 15m max_over_time(metric{env="1"}[10m]) +eval instant at 10m max_over_time(metric{env="1"}[10m]) {env="1"} 120 # Allows relabeling (to-be-dropped) __name__ via label_replace # Unsupported by streaming engine. -# eval instant at 15m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)") +# eval instant at 10m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)") # {my_name="rate_metric", env="1"} 0.2 # {my_name="rate_another_metric", env="1"} 0.2 # Allows preserving __name__ via label_replace # Unsupported by streaming engine. -# eval instant at 15m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)") +# eval instant at 10m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)") # rate_metric{env="1"} 0.2 # rate_another_metric{env="1"} 0.2 # Allows relabeling (to-be-dropped) __name__ via label_join # Unsupported by streaming engine. -# eval instant at 15m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__") +# eval instant at 10m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__") # {my_name="metric", env="1"} 0.2 # {my_name="another_metric", env="1"} 0.2 # Allows preserving __name__ via label_join # Unsupported by streaming engine. -# eval instant at 15m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env") +# eval instant at 10m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env") # metric_1{env="1"} 0.2 # another_metric_1{env="1"} 0.2 # Does not drop metric names fro aggregation operators -eval instant at 15m sum by (__name__, env) (metric{env="1"}) +eval instant at 10m sum by (__name__, env) (metric{env="1"}) metric{env="1"} 120 # Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label) # This is an accidental side effect of delayed __name__ label dropping # Unsupported by streaming engine. -# eval_fail instant at 15m sum by (__name__) (rate({env="1"}[10m])) +# eval_fail instant at 10m sum by (__name__) (rate({env="1"}[10m])) # Aggregation operators aggregate metrics with same labelset and to-be-dropped names # This is an accidental side effect of delayed __name__ label dropping # Unsupported by streaming engine. -# eval instant at 15m sum(rate({env="1"}[10m])) by (env) +# eval instant at 10m sum(rate({env="1"}[10m])) by (env) # {env="1"} 0.4 # Aggregationk operators propagate __name__ label dropping information # Unsupported by streaming engine. -# eval instant at 15m topk(10, sum by (__name__, env) (metric{env="1"})) +# eval instant at 10m topk(10, sum by (__name__, env) (metric{env="1"})) # metric{env="1"} 120 # Unsupported by streaming engine. -# eval instant at 15m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m]))) +# eval instant at 10m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m]))) # {env="1"} 0.2 diff --git a/pkg/streamingpromql/testdata/upstream/native_histograms.test b/pkg/streamingpromql/testdata/upstream/native_histograms.test index 3da3301493e..88771bce739 100644 --- a/pkg/streamingpromql/testdata/upstream/native_histograms.test +++ b/pkg/streamingpromql/testdata/upstream/native_histograms.test @@ -7,62 +7,65 @@ load 5m empty_histogram {{}} -eval instant at 5m empty_histogram +eval instant at 1m empty_histogram {__name__="empty_histogram"} {{}} -eval instant at 5m histogram_count(empty_histogram) +eval instant at 1m histogram_count(empty_histogram) {} 0 -eval instant at 5m histogram_sum(empty_histogram) +eval instant at 1m histogram_sum(empty_histogram) {} 0 # Unsupported by streaming engine. -# eval instant at 5m histogram_avg(empty_histogram) +# eval instant at 1m histogram_avg(empty_histogram) # {} NaN # Unsupported by streaming engine. -# eval instant at 5m histogram_fraction(-Inf, +Inf, empty_histogram) +# eval instant at 1m histogram_fraction(-Inf, +Inf, empty_histogram) # {} NaN # Unsupported by streaming engine. -# eval instant at 5m histogram_fraction(0, 8, empty_histogram) +# eval instant at 1m histogram_fraction(0, 8, empty_histogram) # {} NaN - +clear # buckets:[1 2 1] means 1 observation in the 1st bucket, 2 observations in the 2nd and 1 observation in the 3rd (total 4). load 5m single_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}} # histogram_count extracts the count property from the histogram. -eval instant at 5m histogram_count(single_histogram) +eval instant at 1m histogram_count(single_histogram) {} 4 # histogram_sum extracts the sum property from the histogram. -eval instant at 5m histogram_sum(single_histogram) +eval instant at 1m histogram_sum(single_histogram) {} 5 # histogram_avg calculates the average from sum and count properties. # Unsupported by streaming engine. -# eval instant at 5m histogram_avg(single_histogram) +# eval instant at 1m histogram_avg(single_histogram) # {} 1.25 # We expect half of the values to fall in the range 1 < x <= 2. # Unsupported by streaming engine. -# eval instant at 5m histogram_fraction(1, 2, single_histogram) +# eval instant at 1m histogram_fraction(1, 2, single_histogram) # {} 0.5 # We expect all values to fall in the range 0 < x <= 8. # Unsupported by streaming engine. -# eval instant at 5m histogram_fraction(0, 8, single_histogram) +# eval instant at 1m histogram_fraction(0, 8, single_histogram) # {} 1 -# Median is 1.5 due to linear estimation of the midpoint of the middle bucket, whose values are within range 1 < x <= 2. +# Median is 1.414213562373095 (2**2**-1, or sqrt(2)) due to +# exponential interpolation, i.e. the "midpoint" within range 1 < x <= +# 2 is assumed where the bucket boundary would be if we increased the +# resolution of the histogram by one step. # Unsupported by streaming engine. -# eval instant at 5m histogram_quantile(0.5, single_histogram) -# {} 1.5 - +# eval instant at 1m histogram_quantile(0.5, single_histogram) +# {} 1.414213562373095 +clear # Repeat the same histogram 10 times. load 5m @@ -82,9 +85,10 @@ eval instant at 5m histogram_sum(multi_histogram) # eval instant at 5m histogram_fraction(1, 2, multi_histogram) # {} 0.5 +# See explanation for exponential interpolation above. # Unsupported by streaming engine. # eval instant at 5m histogram_quantile(0.5, multi_histogram) -# {} 1.5 +# {} 1.414213562373095 # Each entry should look the same as the first. @@ -102,11 +106,12 @@ eval instant at 50m histogram_sum(multi_histogram) # eval instant at 50m histogram_fraction(1, 2, multi_histogram) # {} 0.5 +# See explanation for exponential interpolation above. # Unsupported by streaming engine. # eval instant at 50m histogram_quantile(0.5, multi_histogram) -# {} 1.5 - +# {} 1.414213562373095 +clear # Accumulate the histogram addition for 10 iterations, offset is a bucket position where offset:0 is always the bucket # with an upper limit of 1 and offset:1 is the bucket which follows to the right. Negative offsets represent bucket @@ -129,9 +134,10 @@ eval instant at 5m histogram_sum(incr_histogram) # eval instant at 5m histogram_fraction(1, 2, incr_histogram) # {} 0.6 +# See explanation for exponential interpolation above. # Unsupported by streaming engine. # eval instant at 5m histogram_quantile(0.5, incr_histogram) -# {} 1.5 +# {} 1.414213562373095 eval instant at 50m incr_histogram @@ -152,20 +158,22 @@ eval instant at 50m histogram_sum(incr_histogram) # eval instant at 50m histogram_fraction(1, 2, incr_histogram) # {} 0.8571428571428571 +# See explanation for exponential interpolation above. # Unsupported by streaming engine. # eval instant at 50m histogram_quantile(0.5, incr_histogram) -# {} 1.5 +# {} 1.414213562373095 # Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum. -eval instant at 50m rate(incr_histogram[5m]) - {} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}} +eval instant at 50m rate(incr_histogram[10m]) + {} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}} # Calculate the 50th percentile of observations over the last 10m. +# See explanation for exponential interpolation above. # Unsupported by streaming engine. # eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m])) -# {} 1.5 - +# {} 1.414213562373095 +clear # Schema represents the histogram resolution, different schema have compatible bucket boundaries, e.g.: # 0: 1 2 4 8 16 32 64 (higher resolution) @@ -193,86 +201,88 @@ eval instant at 5m histogram_sum(low_res_histogram) # eval instant at 5m histogram_fraction(1, 4, low_res_histogram) # {} 1 - +clear # z_bucket:1 means there is one observation in the zero bucket and z_bucket_w:0.5 means the zero bucket has the range # 0 < x <= 0.5. Sum and count are expected to represent all observations in the histogram, including those in the zero bucket. load 5m single_zero_histogram {{schema:0 z_bucket:1 z_bucket_w:0.5 sum:0.25 count:1}} -eval instant at 5m histogram_count(single_zero_histogram) +eval instant at 1m histogram_count(single_zero_histogram) {} 1 -eval instant at 5m histogram_sum(single_zero_histogram) +eval instant at 1m histogram_sum(single_zero_histogram) {} 0.25 # Unsupported by streaming engine. -# eval instant at 5m histogram_avg(single_zero_histogram) +# eval instant at 1m histogram_avg(single_zero_histogram) # {} 0.25 # When only the zero bucket is populated, or there are negative buckets, the distribution is assumed to be equally # distributed around zero; i.e. that there are an equal number of positive and negative observations. Therefore the # entire distribution must lie within the full range of the zero bucket, in this case: -0.5 < x <= +0.5. # Unsupported by streaming engine. -# eval instant at 5m histogram_fraction(-0.5, 0.5, single_zero_histogram) +# eval instant at 1m histogram_fraction(-0.5, 0.5, single_zero_histogram) # {} 1 # Half of the observations are estimated to be zero, as this is the midpoint between -0.5 and +0.5. # Unsupported by streaming engine. -# eval instant at 5m histogram_quantile(0.5, single_zero_histogram) +# eval instant at 1m histogram_quantile(0.5, single_zero_histogram) # {} 0 - +clear # Let's turn single_histogram upside-down. load 5m negative_histogram {{schema:0 sum:-5 count:4 n_buckets:[1 2 1]}} -eval instant at 5m histogram_count(negative_histogram) +eval instant at 1m histogram_count(negative_histogram) {} 4 -eval instant at 5m histogram_sum(negative_histogram) +eval instant at 1m histogram_sum(negative_histogram) {} -5 # Unsupported by streaming engine. -# eval instant at 5m histogram_avg(negative_histogram) +# eval instant at 1m histogram_avg(negative_histogram) # {} -1.25 # We expect half of the values to fall in the range -2 < x <= -1. # Unsupported by streaming engine. -# eval instant at 5m histogram_fraction(-2, -1, negative_histogram) +# eval instant at 1m histogram_fraction(-2, -1, negative_histogram) # {} 0.5 +# Exponential interpolation works the same as for positive buckets, just mirrored. # Unsupported by streaming engine. -# eval instant at 5m histogram_quantile(0.5, negative_histogram) -# {} -1.5 - +# eval instant at 1m histogram_quantile(0.5, negative_histogram) +# {} -1.414213562373095 +clear # Two histogram samples. load 5m two_samples_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}} {{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}} # We expect to see the newest sample. -eval instant at 10m histogram_count(two_samples_histogram) +eval instant at 5m histogram_count(two_samples_histogram) {} 4 -eval instant at 10m histogram_sum(two_samples_histogram) +eval instant at 5m histogram_sum(two_samples_histogram) {} -4 # Unsupported by streaming engine. -# eval instant at 10m histogram_avg(two_samples_histogram) +# eval instant at 5m histogram_avg(two_samples_histogram) # {} -1 # Unsupported by streaming engine. -# eval instant at 10m histogram_fraction(-2, -1, two_samples_histogram) +# eval instant at 5m histogram_fraction(-2, -1, two_samples_histogram) # {} 0.5 +# See explanation for exponential interpolation above. # Unsupported by streaming engine. -# eval instant at 10m histogram_quantile(0.5, two_samples_histogram) -# {} -1.5 - +# eval instant at 5m histogram_quantile(0.5, two_samples_histogram) +# {} -1.414213562373095 +clear # Add two histograms with negated data. load 5m @@ -298,6 +308,8 @@ eval instant at 5m histogram_sum(balanced_histogram) # eval instant at 5m histogram_quantile(0.5, balanced_histogram) # {} 0.5 +clear + # Add histogram to test sum(last_over_time) regression load 5m incr_sum_histogram{number="1"} {{schema:0 sum:0 count:0 buckets:[1]}}+{{schema:0 sum:1 count:1 buckets:[1]}}x10 @@ -309,6 +321,8 @@ eval instant at 50m histogram_sum(sum(incr_sum_histogram)) eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m]))) {} 30 +clear + # Apply rate function to histogram. load 15s histogram_rate {{schema:1 count:12 sum:18.4 z_bucket:2 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[1 2 0 1 1]}}+{{schema:1 count:9 sum:18.4 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 0 1 1] n_buckets:[1 1 0 1 1]}}x100 @@ -319,6 +333,8 @@ eval instant at 5m rate(histogram_rate[45s]) eval range from 5m to 5m30s step 30s rate(histogram_rate[45s]) {} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}x1 +clear + # Apply count and sum function to histogram. load 10m histogram_count_sum_2 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 @@ -329,6 +345,8 @@ eval instant at 10m histogram_count(histogram_count_sum_2) eval instant at 10m histogram_sum(histogram_count_sum_2) {} 100 +clear + # Apply stddev and stdvar function to histogram with {1, 2, 3, 4} (low res). load 10m histogram_stddev_stdvar_1 {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}x1 @@ -341,6 +359,8 @@ load 10m # eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_1) # {} 1.163807968526718 +clear + # Apply stddev and stdvar function to histogram with {1, 1, 1, 1} (high res). load 10m histogram_stddev_stdvar_2 {{schema:8 count:10 sum:10 buckets:[1 2 3 4]}}x1 @@ -353,6 +373,8 @@ load 10m # eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_2) # {} 2.3971123370139447e-05 +clear + # Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9}. load 10m histogram_stddev_stdvar_3 {{schema:3 count:7 sum:62 z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 @@ -365,6 +387,8 @@ load 10m # eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_3) # {} 1844.4651144196398 +clear + # Apply stddev and stdvar function to histogram with {-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3}. load 10m histogram_stddev_stdvar_4 {{schema:0 count:10 sum:-112946 z_bucket:0 n_buckets:[0 0 1 1 1 0 1 1 0 0 3 0 0 0 1 0 0 1]}}x1 @@ -377,6 +401,8 @@ load 10m # eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_4) # {} 759352122.1939945 +clear + # Apply stddev and stdvar function to histogram with {-10x10}. load 10m histogram_stddev_stdvar_5 {{schema:0 count:10 sum:-100 z_bucket:0 n_buckets:[0 0 0 0 10]}}x1 @@ -389,6 +415,8 @@ load 10m # eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_5) # {} 1.725830020304794 +clear + # Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, NaN}. load 10m histogram_stddev_stdvar_6 {{schema:3 count:7 sum:NaN z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 @@ -401,6 +429,8 @@ load 10m # eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_6) # {} NaN +clear + # Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, Inf}. load 10m histogram_stddev_stdvar_7 {{schema:3 count:7 sum:Inf z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 @@ -413,6 +443,8 @@ load 10m # eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7) # {} Inf +clear + # Apply quantile function to histogram with all positive buckets with zero bucket. load 10m histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1 @@ -425,25 +457,29 @@ load 10m # eval instant at 10m histogram_quantile(1, histogram_quantile_1) # {} 16 +# The following quantiles are within a bucket. Exponential +# interpolation is applied (rather than linear, as it is done for +# classic histograms), leading to slightly different quantile values. # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.99, histogram_quantile_1) -# {} 15.759999999999998 +# {} 15.67072476139083 # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.9, histogram_quantile_1) -# {} 13.600000000000001 +# {} 12.99603834169977 # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.6, histogram_quantile_1) -# {} 4.799999999999997 +# {} 4.594793419988138 # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.5, histogram_quantile_1) -# {} 1.6666666666666665 +# {} 1.5874010519681994 +# Linear interpolation within the zero bucket after all. # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.1, histogram_quantile_1) -# {} 0.0006000000000000001 +# {} 0.0006 # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0, histogram_quantile_1) @@ -453,6 +489,8 @@ load 10m # eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_1) # {} -Inf +clear + # Apply quantile function to histogram with all negative buckets with zero bucket. load 10m histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1 @@ -465,21 +503,24 @@ load 10m # eval instant at 10m histogram_quantile(1, histogram_quantile_2) # {} 0 +# Again, the quantile values here are slightly different from what +# they would be with linear interpolation. Note that quantiles +# ending up in the zero bucket are linearly interpolated after all. # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.99, histogram_quantile_2) -# {} -6.000000000000048e-05 +# {} -0.00006 # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.9, histogram_quantile_2) -# {} -0.0005999999999999996 +# {} -0.0006 # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.5, histogram_quantile_2) -# {} -1.6666666666666667 +# {} -1.5874010519681996 # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.1, histogram_quantile_2) -# {} -13.6 +# {} -12.996038341699768 # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0, histogram_quantile_2) @@ -489,7 +530,11 @@ load 10m # eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_2) # {} -Inf -# Apply quantile function to histogram with both positive and negative buckets with zero bucket. +clear + +# Apply quantile function to histogram with both positive and negative +# buckets with zero bucket. +# First positive buckets with exponential interpolation. load 10m histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 @@ -503,19 +548,21 @@ load 10m # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.99, histogram_quantile_3) -# {} 15.519999999999996 +# {} 15.34822590920423 # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.9, histogram_quantile_3) -# {} 11.200000000000003 +# {} 10.556063286183155 # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.7, histogram_quantile_3) -# {} 1.2666666666666657 +# {} 1.2030250360821164 +# Linear interpolation in the zero bucket, symmetrically centered around +# the zero point. # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.55, histogram_quantile_3) -# {} 0.0006000000000000005 +# {} 0.0006 # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.5, histogram_quantile_3) @@ -523,19 +570,20 @@ load 10m # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.45, histogram_quantile_3) -# {} -0.0005999999999999996 +# {} -0.0006 +# Finally negative buckets with mirrored exponential interpolation. # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.3, histogram_quantile_3) -# {} -1.266666666666667 +# {} -1.2030250360821169 # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.1, histogram_quantile_3) -# {} -11.2 +# {} -10.556063286183155 # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0.01, histogram_quantile_3) -# {} -15.52 +# {} -15.34822590920423 # Unsupported by streaming engine. # eval instant at 10m histogram_quantile(0, histogram_quantile_3) @@ -545,6 +593,108 @@ load 10m # eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_3) # {} -Inf +clear + +# Try different schemas. (The interpolation logic must not depend on the schema.) +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 buckets:[0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 buckets:[0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 buckets:[0 5]}} + +# Unsupported by streaming engine. +# eval instant at 1m histogram_quantile(0.5, var_res_histogram) +# {schema="-1"} 2.0 +# {schema="0"} 1.4142135623730951 +# {schema="+1"} 1.189207 + +# Unsupported by streaming engine. +# eval instant at 1m histogram_fraction(0, 2, var_res_histogram{schema="-1"}) +# {schema="-1"} 0.5 + +# Unsupported by streaming engine. +# eval instant at 1m histogram_fraction(0, 1.4142135623730951, var_res_histogram{schema="0"}) +# {schema="0"} 0.5 + +# Unsupported by streaming engine. +# eval instant at 1m histogram_fraction(0, 1.189207, var_res_histogram{schema="+1"}) +# {schema="+1"} 0.5 + +# The same as above, but one bucket "further to the right". +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 buckets:[0 0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 buckets:[0 0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 buckets:[0 0 5]}} + +# Unsupported by streaming engine. +# eval instant at 1m histogram_quantile(0.5, var_res_histogram) +# {schema="-1"} 8.0 +# {schema="0"} 2.82842712474619 +# {schema="+1"} 1.6817928305074292 + +# Unsupported by streaming engine. +# eval instant at 1m histogram_fraction(0, 8, var_res_histogram{schema="-1"}) +# {schema="-1"} 0.5 + +# Unsupported by streaming engine. +# eval instant at 1m histogram_fraction(0, 2.82842712474619, var_res_histogram{schema="0"}) +# {schema="0"} 0.5 + +# Unsupported by streaming engine. +# eval instant at 1m histogram_fraction(0, 1.6817928305074292, var_res_histogram{schema="+1"}) +# {schema="+1"} 0.5 + +# And everything again but for negative buckets. +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 n_buckets:[0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 n_buckets:[0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 n_buckets:[0 5]}} + +# Unsupported by streaming engine. +# eval instant at 1m histogram_quantile(0.5, var_res_histogram) +# {schema="-1"} -2.0 +# {schema="0"} -1.4142135623730951 +# {schema="+1"} -1.189207 + +# Unsupported by streaming engine. +# eval instant at 1m histogram_fraction(-2, 0, var_res_histogram{schema="-1"}) +# {schema="-1"} 0.5 + +# Unsupported by streaming engine. +# eval instant at 1m histogram_fraction(-1.4142135623730951, 0, var_res_histogram{schema="0"}) +# {schema="0"} 0.5 + +# Unsupported by streaming engine. +# eval instant at 1m histogram_fraction(-1.189207, 0, var_res_histogram{schema="+1"}) +# {schema="+1"} 0.5 + +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 n_buckets:[0 0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 n_buckets:[0 0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 n_buckets:[0 0 5]}} + +# Unsupported by streaming engine. +# eval instant at 1m histogram_quantile(0.5, var_res_histogram) +# {schema="-1"} -8.0 +# {schema="0"} -2.82842712474619 +# {schema="+1"} -1.6817928305074292 + +# Unsupported by streaming engine. +# eval instant at 1m histogram_fraction(-8, 0, var_res_histogram{schema="-1"}) +# {schema="-1"} 0.5 + +# Unsupported by streaming engine. +# eval instant at 1m histogram_fraction(-2.82842712474619, 0, var_res_histogram{schema="0"}) +# {schema="0"} 0.5 + +# Unsupported by streaming engine. +# eval instant at 1m histogram_fraction(-1.6817928305074292, 0, var_res_histogram{schema="+1"}) +# {schema="+1"} 0.5 + + # Apply fraction function to empty histogram. load 10m histogram_fraction_1 {{}}x1 @@ -553,6 +703,8 @@ load 10m # eval instant at 10m histogram_fraction(3.1415, 42, histogram_fraction_1) # {} NaN +clear + # Apply fraction function to histogram with positive and zero buckets. load 10m histogram_fraction_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1 @@ -573,13 +725,21 @@ load 10m # eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_2) # {} 0.16666666666666666 +# Note that this result and the one above add up to 1. +# Unsupported by streaming engine. +# eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2) +# {} 0.8333333333333334 + +# We are in the zero bucket, resulting in linear interpolation # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(0, 0.0005, histogram_fraction_2) # {} 0.08333333333333333 +# Demonstrate that the inverse operation with histogram_quantile yields +# the original value with the non-trivial result above. # Unsupported by streaming engine. -# eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2) -# {} 0.8333333333333334 +# eval instant at 10m histogram_quantile(0.08333333333333333, histogram_fraction_2) +# {} 0.0005 # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_2) @@ -589,21 +749,37 @@ load 10m # eval instant at 10m histogram_fraction(1, 2, histogram_fraction_2) # {} 0.25 +# More non-trivial results with interpolation involved below, including +# some round-trips via histogram_quantile to prove that the inverse +# operation leads to the same results. + +# Unsupported by streaming engine. +# eval instant at 10m histogram_fraction(0, 1.5, histogram_fraction_2) +# {} 0.4795739585136224 + # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_2) -# {} 0.125 +# {} 0.10375937481971091 # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(1, 8, histogram_fraction_2) # {} 0.3333333333333333 +# Unsupported by streaming engine. +# eval instant at 10m histogram_fraction(0, 6, histogram_fraction_2) +# {} 0.6320802083934297 + +# Unsupported by streaming engine. +# eval instant at 10m histogram_quantile(0.6320802083934297, histogram_fraction_2) +# {} 6 + # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(1, 6, histogram_fraction_2) -# {} 0.2916666666666667 +# {} 0.29874687506009634 # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_2) -# {} 0.16666666666666666 +# {} 0.15250624987980724 # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_2) @@ -685,6 +861,14 @@ load 10m # eval instant at 10m histogram_fraction(-0.0005, 0, histogram_fraction_3) # {} 0.08333333333333333 +# Unsupported by streaming engine. +# eval instant at 10m histogram_fraction(-inf, -0.0005, histogram_fraction_3) +# {} 0.9166666666666666 + +# Unsupported by streaming engine. +# eval instant at 10m histogram_quantile(0.9166666666666666, histogram_fraction_3) +# {} -0.0005 + # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_3) # {} 0 @@ -719,19 +903,27 @@ load 10m # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_3) -# {} 0.125 +# {} 0.10375937481971091 # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_3) # {} 0.3333333333333333 +# Unsupported by streaming engine. +# eval instant at 10m histogram_fraction(-inf, -6, histogram_fraction_3) +# {} 0.36791979160657035 + +# Unsupported by streaming engine. +# eval instant at 10m histogram_quantile(0.36791979160657035, histogram_fraction_3) +# {} -6 + # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_3) -# {} 0.2916666666666667 +# {} 0.29874687506009634 # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_3) -# {} 0.16666666666666666 +# {} 0.15250624987980724 # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_3) @@ -769,6 +961,8 @@ load 10m # eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_3) # {} 1 +clear + # Apply fraction function to histogram with both positive, negative and zero buckets. load 10m histogram_fraction_4 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 @@ -793,6 +987,22 @@ load 10m # eval instant at 10m histogram_fraction(-0.0005, 0.0005, histogram_fraction_4) # {} 0.08333333333333333 +# Unsupported by streaming engine. +# eval instant at 10m histogram_fraction(-inf, 0.0005, histogram_fraction_4) +# {} 0.5416666666666666 + +# Unsupported by streaming engine. +# eval instant at 10m histogram_quantile(0.5416666666666666, histogram_fraction_4) +# {} 0.0005 + +# Unsupported by streaming engine. +# eval instant at 10m histogram_fraction(-inf, -0.0005, histogram_fraction_4) +# {} 0.4583333333333333 + +# Unsupported by streaming engine. +# eval instant at 10m histogram_quantile(0.4583333333333333, histogram_fraction_4) +# {} -0.0005 + # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_4) # {} 0.4166666666666667 @@ -807,7 +1017,7 @@ load 10m # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_4) -# {} 0.0625 +# {} 0.051879687409855414 # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(1, 8, histogram_fraction_4) @@ -815,11 +1025,11 @@ load 10m # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(1, 6, histogram_fraction_4) -# {} 0.14583333333333334 +# {} 0.14937343753004825 # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_4) -# {} 0.08333333333333333 +# {} 0.07625312493990366 # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_4) @@ -827,7 +1037,7 @@ load 10m # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_4) -# {} 0.0625 +# {} 0.051879687409855456 # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_4) @@ -835,11 +1045,11 @@ load 10m # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_4) -# {} 0.14583333333333334 +# {} 0.14937343753004817 # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_4) -# {} 0.08333333333333333 +# {} 0.07625312493990362 # Unsupported by streaming engine. # eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_4) @@ -929,18 +1139,40 @@ eval instant at 10m histogram_mul_div*float_series_0 eval instant at 10m float_series_0*histogram_mul_div {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} -# TODO: (NeerajGartia21) remove all the histogram buckets in case of division with zero. See: https://github.com/prometheus/prometheus/issues/13934 eval instant at 10m histogram_mul_div/0 - {} {{schema:0 count:Inf sum:Inf z_bucket:Inf z_bucket_w:0.001 buckets:[Inf Inf Inf] n_buckets:[Inf Inf Inf]}} + {} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}} eval instant at 10m histogram_mul_div/float_series_0 - {} {{schema:0 count:Inf sum:Inf z_bucket:Inf z_bucket_w:0.001 buckets:[Inf Inf Inf] n_buckets:[Inf Inf Inf]}} + {} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}} eval instant at 10m histogram_mul_div*0/0 - {} {{schema:0 count:NaN sum:NaN z_bucket:NaN z_bucket_w:0.001 buckets:[NaN NaN NaN] n_buckets:[NaN NaN NaN]}} + {} {{schema:0 count:NaN sum:NaN z_bucket_w:0.001 z_bucket:NaN}} + +eval_info instant at 10m histogram_mul_div*histogram_mul_div + +eval_info instant at 10m histogram_mul_div/histogram_mul_div + +eval_info instant at 10m float_series_3/histogram_mul_div + +eval_info instant at 10m 0/histogram_mul_div clear +# Apply binary operators to mixed histogram and float samples. +# TODO:(NeerajGartia21) move these tests to their respective locations when tests from engine_test.go are be moved here. + +load 10m + histogram_sample {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 + float_sample 0x1 + +eval_info instant at 10m float_sample+histogram_sample + +eval_info instant at 10m histogram_sample+float_sample + +eval_info instant at 10m float_sample-histogram_sample + +eval_info instant at 10m histogram_sample-float_sample + # Counter reset only noticeable in a single bucket. load 5m reset_in_bucket {{schema:0 count:4 sum:5 buckets:[1 2 1]}} {{schema:0 count:5 sum:6 buckets:[1 1 3]}} {{schema:0 count:6 sum:7 buckets:[1 2 3]}} @@ -979,7 +1211,7 @@ load 30s some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}} # Test the case where we only have two points for rate -eval_warn instant at 30s rate(some_metric[30s]) +eval_warn instant at 30s rate(some_metric[1m]) {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} # Test the case where we have more than two points for rate @@ -1001,11 +1233,11 @@ eval_warn instant at 1m30s rate(some_metric[1m]) # Should produce no results. # Start with custom, end with exponential. -eval_warn instant at 1m rate(some_metric[30s]) +eval_warn instant at 1m rate(some_metric[1m]) # Should produce no results. # Start with exponential, end with custom. -eval_warn instant at 30s rate(some_metric[30s]) +eval_warn instant at 30s rate(some_metric[1m]) # Should produce no results. clear @@ -1146,8 +1378,10 @@ clear load 1m histogram_sum_over_time {{schema:0 count:25 sum:1234.5 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:0 count:41 sum:1111.1 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:1 count:0}} -eval instant at 3m sum_over_time(histogram_sum_over_time[3m:1m]) +eval instant at 3m sum_over_time(histogram_sum_over_time[4m:1m]) {} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}} -eval instant at 3m avg_over_time(histogram_sum_over_time[3m:1m]) +eval instant at 3m avg_over_time(histogram_sum_over_time[4m:1m]) {} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}} + +clear diff --git a/pkg/streamingpromql/testdata/upstream/operators.test b/pkg/streamingpromql/testdata/upstream/operators.test index 399f2496231..763c7d6c3de 100644 --- a/pkg/streamingpromql/testdata/upstream/operators.test +++ b/pkg/streamingpromql/testdata/upstream/operators.test @@ -12,6 +12,7 @@ load 5m http_requests{job="app-server", instance="1", group="production"} 0+60x10 http_requests{job="app-server", instance="0", group="canary"} 0+70x10 http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + http_requests_histogram{job="app-server", instance="1", group="production"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}}x11 load 5m vector_matching_a{l="x"} 0+1x100 @@ -119,7 +120,7 @@ eval instant at 50m http_requests{job="api-server", group="canary"} http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="canary", instance="1", job="api-server"} 400 -eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[5m]) * 5 * 60 +eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[10m]) * 5 * 60 {group="canary", instance="0", job="api-server"} 330 {group="canary", instance="1", job="api-server"} 440 @@ -293,6 +294,26 @@ eval instant at 50m 1 == bool 1 eval instant at 50m http_requests{job="api-server", instance="0", group="production"} == bool 100 {job="api-server", instance="0", group="production"} 1 +# The histogram is ignored here so the result doesn't change but it has an info annotation now. +eval_info instant at 5m {job="app-server"} == 80 + http_requests{group="canary", instance="1", job="app-server"} 80 + +eval_info instant at 5m http_requests_histogram != 80 + +eval_info instant at 5m http_requests_histogram > 80 + +eval_info instant at 5m http_requests_histogram < 80 + +eval_info instant at 5m http_requests_histogram >= 80 + +eval_info instant at 5m http_requests_histogram <= 80 + +# Should produce valid results in case of (in)equality between two histograms. +eval instant at 5m http_requests_histogram == http_requests_histogram + http_requests_histogram{job="app-server", instance="1", group="production"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}} + +eval instant at 5m http_requests_histogram != http_requests_histogram + # group_left/group_right. clear @@ -315,78 +336,78 @@ load 5m # Copy machine role to node variable. # Unsupported by streaming engine. -# eval instant at 5m node_role * on (instance) group_right (role) node_var +# eval instant at 1m node_role * on (instance) group_right (role) node_var # {instance="abc",job="node",role="prometheus"} 2 # Unsupported by streaming engine. -# eval instant at 5m node_var * on (instance) group_left (role) node_role +# eval instant at 1m node_var * on (instance) group_left (role) node_role # {instance="abc",job="node",role="prometheus"} 2 # Unsupported by streaming engine. -# eval instant at 5m node_var * ignoring (role) group_left (role) node_role +# eval instant at 1m node_var * ignoring (role) group_left (role) node_role # {instance="abc",job="node",role="prometheus"} 2 # Unsupported by streaming engine. -# eval instant at 5m node_role * ignoring (role) group_right (role) node_var +# eval instant at 1m node_role * ignoring (role) group_right (role) node_var # {instance="abc",job="node",role="prometheus"} 2 # Copy machine role to node variable with instrumentation labels. # Unsupported by streaming engine. -# eval instant at 5m node_cpu * ignoring (role, mode) group_left (role) node_role +# eval instant at 1m node_cpu * ignoring (role, mode) group_left (role) node_role # {instance="abc",job="node",mode="idle",role="prometheus"} 3 # {instance="abc",job="node",mode="user",role="prometheus"} 1 # Unsupported by streaming engine. -# eval instant at 5m node_cpu * on (instance) group_left (role) node_role +# eval instant at 1m node_cpu * on (instance) group_left (role) node_role # {instance="abc",job="node",mode="idle",role="prometheus"} 3 # {instance="abc",job="node",mode="user",role="prometheus"} 1 # Ratio of total. # Unsupported by streaming engine. -# eval instant at 5m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu) +# eval instant at 1m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu) # {instance="abc",job="node",mode="idle"} .75 # {instance="abc",job="node",mode="user"} .25 # {instance="def",job="node",mode="idle"} .80 # {instance="def",job="node",mode="user"} .20 # Unsupported by streaming engine. -# eval instant at 5m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu) +# eval instant at 1m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu) # {job="node",mode="idle"} 0.7857142857142857 # {job="node",mode="user"} 0.21428571428571427 # Unsupported by streaming engine. -# eval instant at 5m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)) +# eval instant at 1m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)) # {} 1.0 # Unsupported by streaming engine. -# eval instant at 5m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu) +# eval instant at 1m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu) # {instance="abc",job="node",mode="idle"} .75 # {instance="abc",job="node",mode="user"} .25 # {instance="def",job="node",mode="idle"} .80 # {instance="def",job="node",mode="user"} .20 # Unsupported by streaming engine. -# eval instant at 5m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu) +# eval instant at 1m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu) # {instance="abc",job="node",mode="idle"} .75 # {instance="abc",job="node",mode="user"} .25 # {instance="def",job="node",mode="idle"} .80 # {instance="def",job="node",mode="user"} .20 # Unsupported by streaming engine. -# eval instant at 5m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu) +# eval instant at 1m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu) # {job="node",mode="idle"} 0.7857142857142857 # {job="node",mode="user"} 0.21428571428571427 # Unsupported by streaming engine. -# eval instant at 5m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)) +# eval instant at 1m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)) # {} 1.0 # Copy over label from metric with no matching labels, without having to list cross-job target labels ('job' here). # Unsupported by streaming engine. -# eval instant at 5m node_cpu + on(dummy) group_left(foo) random*0 +# eval instant at 1m node_cpu + on(dummy) group_left(foo) random*0 # {instance="abc",job="node",mode="idle",foo="bar"} 3 # {instance="abc",job="node",mode="user",foo="bar"} 1 # {instance="def",job="node",mode="idle",foo="bar"} 8 @@ -395,13 +416,13 @@ load 5m # Use threshold from metric, and copy over target. # Unsupported by streaming engine. -# eval instant at 5m node_cpu > on(job, instance) group_left(target) threshold +# eval instant at 1m node_cpu > on(job, instance) group_left(target) threshold # node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3 # node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1 # Use threshold from metric, and a default (1) if it's not present. # Unsupported by streaming engine. -# eval instant at 5m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1)) +# eval instant at 1m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1)) # node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3 # node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1 # node_cpu{instance="def",job="node",mode="idle"} 8 @@ -409,37 +430,37 @@ load 5m # Check that binops drop the metric name. -eval instant at 5m node_cpu + 2 +eval instant at 1m node_cpu + 2 {instance="abc",job="node",mode="idle"} 5 {instance="abc",job="node",mode="user"} 3 {instance="def",job="node",mode="idle"} 10 {instance="def",job="node",mode="user"} 4 -eval instant at 5m node_cpu - 2 +eval instant at 1m node_cpu - 2 {instance="abc",job="node",mode="idle"} 1 {instance="abc",job="node",mode="user"} -1 {instance="def",job="node",mode="idle"} 6 {instance="def",job="node",mode="user"} 0 -eval instant at 5m node_cpu / 2 +eval instant at 1m node_cpu / 2 {instance="abc",job="node",mode="idle"} 1.5 {instance="abc",job="node",mode="user"} 0.5 {instance="def",job="node",mode="idle"} 4 {instance="def",job="node",mode="user"} 1 -eval instant at 5m node_cpu * 2 +eval instant at 1m node_cpu * 2 {instance="abc",job="node",mode="idle"} 6 {instance="abc",job="node",mode="user"} 2 {instance="def",job="node",mode="idle"} 16 {instance="def",job="node",mode="user"} 4 -eval instant at 5m node_cpu ^ 2 +eval instant at 1m node_cpu ^ 2 {instance="abc",job="node",mode="idle"} 9 {instance="abc",job="node",mode="user"} 1 {instance="def",job="node",mode="idle"} 64 {instance="def",job="node",mode="user"} 4 -eval instant at 5m node_cpu % 2 +eval instant at 1m node_cpu % 2 {instance="abc",job="node",mode="idle"} 1 {instance="abc",job="node",mode="user"} 1 {instance="def",job="node",mode="idle"} 0 @@ -454,14 +475,14 @@ load 5m metricB{baz="meh"} 4 # On with no labels, for metrics with no common labels. -eval instant at 5m random + on() metricA +eval instant at 1m random + on() metricA {} 5 # Ignoring with no labels is the same as no ignoring. -eval instant at 5m metricA + ignoring() metricB +eval instant at 1m metricA + ignoring() metricB {baz="meh"} 7 -eval instant at 5m metricA + metricB +eval instant at 1m metricA + metricB {baz="meh"} 7 clear @@ -479,16 +500,16 @@ load 5m test_total{instance="localhost"} 50 test_smaller{instance="localhost"} 10 -eval instant at 5m test_total > bool test_smaller +eval instant at 1m test_total > bool test_smaller {instance="localhost"} 1 -eval instant at 5m test_total > test_smaller +eval instant at 1m test_total > test_smaller test_total{instance="localhost"} 50 -eval instant at 5m test_total < bool test_smaller +eval instant at 1m test_total < bool test_smaller {instance="localhost"} 0 -eval instant at 5m test_total < test_smaller +eval instant at 1m test_total < test_smaller clear @@ -498,14 +519,313 @@ load 5m trigx{} 20 trigNaN{} NaN -eval instant at 5m trigy atan2 trigx +eval instant at 1m trigy atan2 trigx {} 0.4636476090008061 -eval instant at 5m trigy atan2 trigNaN +eval instant at 1m trigy atan2 trigNaN {} NaN -eval instant at 5m 10 atan2 20 +eval instant at 1m 10 atan2 20 0.4636476090008061 -eval instant at 5m 10 atan2 NaN +eval instant at 1m 10 atan2 NaN NaN + +clear + +# Test comparison operations with floats and histograms. +load 6m + left_floats 1 2 _ _ 3 stale 4 5 NaN Inf -Inf + right_floats 4 _ _ 5 3 7 -1 20 NaN Inf -Inf + left_histograms {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:4.5 count:5 buckets:[1 3 1]}} _ _ {{schema:3 sum:4.5 count:5 buckets:[1 3 1]}} + right_histograms {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:4 count:4 buckets:[1 2 1]}} _ _ + right_floats_for_histograms 0 -1 2 3 4 + +eval range from 0 to 60m step 6m left_floats == right_floats + left_floats _ _ _ _ 3 _ _ _ _ Inf -Inf + +eval range from 0 to 60m step 6m left_floats == bool right_floats + {} 0 _ _ _ 1 _ 0 0 0 1 1 + +eval range from 0 to 60m step 6m left_floats == does_not_match + # No results. + +eval range from 0 to 24m step 6m left_histograms == right_histograms + left_histograms {{schema:3 sum:4 count:4 buckets:[1 2 1]}} _ _ _ _ + +eval range from 0 to 24m step 6m left_histograms == bool right_histograms + {} 1 0 _ _ _ + +eval_info range from 0 to 24m step 6m left_histograms == right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms == bool right_floats_for_histograms + # No results. + +eval range from 0 to 60m step 6m left_floats != right_floats + left_floats 1 _ _ _ _ _ 4 5 NaN _ _ + +eval range from 0 to 60m step 6m left_floats != bool right_floats + {} 1 _ _ _ 0 _ 1 1 1 0 0 + +eval range from 0 to 24m step 6m left_histograms != right_histograms + left_histograms _ {{schema:3 sum:4.5 count:5 buckets:[1 3 1]}} _ _ _ + +eval range from 0 to 24m step 6m left_histograms != bool right_histograms + {} 0 1 _ _ _ + +eval_info range from 0 to 24m step 6m left_histograms != right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms != bool right_floats_for_histograms + # No results. + +eval range from 0 to 60m step 6m left_floats > right_floats + left_floats _ _ _ _ _ _ 4 _ _ _ _ + +eval range from 0 to 60m step 6m left_floats > bool right_floats + {} 0 _ _ _ 0 _ 1 0 0 0 0 + +eval_info range from 0 to 24m step 6m left_histograms > right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > bool right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > bool right_floats_for_histograms + # No results. + +eval range from 0 to 60m step 6m left_floats >= right_floats + left_floats _ _ _ _ 3 _ 4 _ _ Inf -Inf + +eval range from 0 to 60m step 6m left_floats >= bool right_floats + {} 0 _ _ _ 1 _ 1 0 0 1 1 + +eval_info range from 0 to 24m step 6m left_histograms >= right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= bool right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= bool right_floats_for_histograms + # No results. + +eval range from 0 to 60m step 6m left_floats < right_floats + left_floats 1 _ _ _ _ _ _ 5 _ _ _ + +eval range from 0 to 60m step 6m left_floats < bool right_floats + {} 1 _ _ _ 0 _ 0 1 0 0 0 + +eval_info range from 0 to 24m step 6m left_histograms < right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < bool right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < bool right_floats_for_histograms + # No results. + +eval range from 0 to 60m step 6m left_floats <= right_floats + left_floats 1 _ _ _ 3 _ _ 5 _ Inf -Inf + +eval range from 0 to 60m step 6m left_floats <= bool right_floats + {} 1 _ _ _ 1 _ 0 1 0 1 1 + +eval_info range from 0 to 24m step 6m left_histograms <= right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= bool right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= bool right_floats_for_histograms + # No results. + +# Vector / scalar combinations with scalar on right side +eval range from 0 to 60m step 6m left_floats == 3 + left_floats _ _ _ _ 3 _ _ _ _ _ _ + +eval range from 0 to 60m step 6m left_floats != 3 + left_floats 1 2 _ _ _ _ 4 5 NaN Inf -Inf + +eval range from 0 to 60m step 6m left_floats > 3 + left_floats _ _ _ _ _ _ 4 5 _ Inf _ + +eval range from 0 to 60m step 6m left_floats >= 3 + left_floats _ _ _ _ 3 _ 4 5 _ Inf _ + +eval range from 0 to 60m step 6m left_floats < 3 + left_floats 1 2 _ _ _ _ _ _ _ _ -Inf + +eval range from 0 to 60m step 6m left_floats <= 3 + left_floats 1 2 _ _ 3 _ _ _ _ _ -Inf + +eval range from 0 to 60m step 6m left_floats == bool 3 + {} 0 0 _ _ 1 _ 0 0 0 0 0 + +eval range from 0 to 60m step 6m left_floats == Inf + left_floats _ _ _ _ _ _ _ _ _ Inf _ + +eval range from 0 to 60m step 6m left_floats == bool Inf + {} 0 0 _ _ 0 _ 0 0 0 1 0 + +eval range from 0 to 60m step 6m left_floats == NaN + # No results. + +eval range from 0 to 60m step 6m left_floats == bool NaN + {} 0 0 _ _ 0 _ 0 0 0 0 0 + +eval_info range from 0 to 24m step 6m left_histograms == 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms == 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms != 3 + # No results. + +eval range from 0 to 24m step 6m left_histograms != 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > 0 + # No results. + +eval range from 0 to 24m step 6m left_histograms >= 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= 3 + # No results. + +eval range from 0 to 24m step 6m left_histograms <= 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms == bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms == bool 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms != bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms != bool 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > bool 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= bool 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < bool 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= bool 0 + # No results. + +# Vector / scalar combinations with scalar on left side +eval range from 0 to 60m step 6m 3 == left_floats + left_floats _ _ _ _ 3 _ _ _ _ _ _ + +eval range from 0 to 60m step 6m 3 != left_floats + left_floats 1 2 _ _ _ _ 4 5 NaN Inf -Inf + +eval range from 0 to 60m step 6m 3 < left_floats + left_floats _ _ _ _ _ _ 4 5 _ Inf _ + +eval range from 0 to 60m step 6m 3 <= left_floats + left_floats _ _ _ _ 3 _ 4 5 _ Inf _ + +eval range from 0 to 60m step 6m 3 > left_floats + left_floats 1 2 _ _ _ _ _ _ _ _ -Inf + +eval range from 0 to 60m step 6m 3 >= left_floats + left_floats 1 2 _ _ 3 _ _ _ _ _ -Inf + +eval range from 0 to 60m step 6m 3 == bool left_floats + {} 0 0 _ _ 1 _ 0 0 0 0 0 + +eval range from 0 to 60m step 6m Inf == left_floats + left_floats _ _ _ _ _ _ _ _ _ Inf _ + +eval range from 0 to 60m step 6m Inf == bool left_floats + {} 0 0 _ _ 0 _ 0 0 0 1 0 + +eval range from 0 to 60m step 6m NaN == left_floats + # No results. + +eval range from 0 to 60m step 6m NaN == bool left_floats + {} 0 0 _ _ 0 _ 0 0 0 0 0 + +eval range from 0 to 24m step 6m 3 == left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 == left_histograms + # No results. + +eval range from 0 to 24m step 6m 3 != left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 != left_histograms + # No results. + +eval range from 0 to 24m step 6m 3 < left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 < left_histograms + # No results. + +eval range from 0 to 24m step 6m 3 < left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 < left_histograms + # No results. + +eval range from 0 to 24m step 6m 3 > left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 > left_histograms + # No results. + +eval range from 0 to 24m step 6m 3 >= left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 >= left_histograms + # No results. + +clear diff --git a/pkg/streamingpromql/testdata/upstream/range_queries.test b/pkg/streamingpromql/testdata/upstream/range_queries.test index 9770ee8f6fa..33c17e1350e 100644 --- a/pkg/streamingpromql/testdata/upstream/range_queries.test +++ b/pkg/streamingpromql/testdata/upstream/range_queries.test @@ -4,20 +4,20 @@ # Provenance-includes-copyright: The Prometheus Authors # sum_over_time with all values -load 30s +load 15s bar 0 1 10 100 1000 -eval range from 0 to 2m step 1m sum_over_time(bar[30s]) +eval range from 0 to 1m step 30s sum_over_time(bar[30s]) {} 0 11 1100 clear # sum_over_time with trailing values -load 30s +load 15s bar 0 1 10 100 1000 0 0 0 0 eval range from 0 to 2m step 1m sum_over_time(bar[30s]) - {} 0 11 1100 + {} 0 1100 0 clear @@ -26,15 +26,15 @@ load 30s bar 0 1 10 100 1000 10000 100000 1000000 10000000 eval range from 0 to 4m step 1m sum_over_time(bar[30s]) - {} 0 11 1100 110000 11000000 + {} 0 10 1000 100000 10000000 clear # sum_over_time with all values random -load 30s +load 15s bar 5 17 42 2 7 905 51 -eval range from 0 to 3m step 1m sum_over_time(bar[30s]) +eval range from 0 to 90s step 30s sum_over_time(bar[30s]) {} 5 59 9 956 clear diff --git a/pkg/streamingpromql/testdata/upstream/staleness.test b/pkg/streamingpromql/testdata/upstream/staleness.test index 76224047a3c..336bc331491 100644 --- a/pkg/streamingpromql/testdata/upstream/staleness.test +++ b/pkg/streamingpromql/testdata/upstream/staleness.test @@ -19,10 +19,10 @@ eval instant at 40s metric {__name__="metric"} 2 # It goes stale 5 minutes after the last sample. -eval instant at 330s metric +eval instant at 329s metric {__name__="metric"} 2 -eval instant at 331s metric +eval instant at 330s metric # Range vector ignores stale sample. @@ -35,9 +35,13 @@ eval instant at 10s count_over_time(metric[1s]) eval instant at 20s count_over_time(metric[1s]) eval instant at 20s count_over_time(metric[10s]) + +eval instant at 20s count_over_time(metric[20s]) {} 1 eval instant at 20s count_over_time(metric[10]) + +eval instant at 20s count_over_time(metric[20]) {} 1 @@ -53,7 +57,7 @@ eval instant at 0s metric eval instant at 150s metric {__name__="metric"} 0 -eval instant at 300s metric +eval instant at 299s metric {__name__="metric"} 0 -eval instant at 301s metric +eval instant at 300s metric diff --git a/pkg/streamingpromql/testdata/upstream/subquery.test b/pkg/streamingpromql/testdata/upstream/subquery.test index 4056278afef..1849a4576bb 100644 --- a/pkg/streamingpromql/testdata/upstream/subquery.test +++ b/pkg/streamingpromql/testdata/upstream/subquery.test @@ -15,18 +15,18 @@ eval instant at 10s sum_over_time(metric[50s:5s]) # Every evaluation yields the last value, i.e. 2 eval instant at 5m sum_over_time(metric[50s:10s]) - {} 12 + {} 10 # Series becomes stale at 5m10s (5m after last sample) -# Hence subquery gets a single sample at 6m-50s=5m10s. -eval instant at 6m sum_over_time(metric[50s:10s]) +# Hence subquery gets a single sample at 5m10s. +eval instant at 5m59s sum_over_time(metric[60s:10s]) {} 2 eval instant at 10s rate(metric[20s:10s]) {} 0.1 eval instant at 20s rate(metric[20s:5s]) - {} 0.05 + {} 0.06666666666666667 clear @@ -54,16 +54,16 @@ load 10s metric3 0+3x1000 eval instant at 1000s sum_over_time(metric1[30s:10s]) - {} 394 + {} 297 -# This is (394*2 - 100), because other than the last 100 at 1000s, +# This is (97 + 98*2 + 99*2 + 100), because other than 97@975s and 100@1000s, # everything else is repeated with the 5s step. eval instant at 1000s sum_over_time(metric1[30s:5s]) - {} 688 + {} 591 -# Offset is aligned with the step. +# Offset is aligned with the step, so this is from [98@980s, 99@990s, 100@1000s]. eval instant at 1010s sum_over_time(metric1[30s:10s] offset 10s) - {} 394 + {} 297 # Same result for different offsets due to step alignment. eval instant at 1010s sum_over_time(metric1[30s:10s] offset 9s) @@ -98,16 +98,16 @@ eval instant at 1010s sum_over_time((metric1)[30:10] offset 3) # Nested subqueries eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s]) - {} 0.4 + {} 0.30000000000000004 eval instant at 1000s rate(sum_over_time(metric2[30s:10s])[50s:10s]) - {} 0.8 + {} 0.6000000000000001 eval instant at 1000s rate(sum_over_time(metric3[30s:10s])[50s:10s]) - {} 1.2 + {} 0.9 eval instant at 1000s rate(sum_over_time((metric1+metric2+metric3)[30s:10s])[30s:10s]) - {} 2.4 + {} 1.8 clear @@ -120,16 +120,20 @@ load 7s eval instant at 80s rate(metric[1m]) {} 2.517857143 -# No extrapolation, [2@20, 144@80]: (144 - 2) / 60 -eval instant at 80s rate(metric[1m:10s]) - {} 2.366666667 +# Extrapolated to range start for counter, [2@20, 144@80]: (144 - 2) / (80 - 20) +eval instant at 80s rate(metric[1m500ms:10s]) + {} 2.3666666666666667 + +# Extrapolated to zero value for counter, [2@20, 144@80]: (144 - 0) / 61 +eval instant at 80s rate(metric[1m1s:10s]) + {} 2.360655737704918 # Only one value between 10s and 20s, 2@14 eval instant at 20s min_over_time(metric[10s]) {} 2 -# min(1@10, 2@20) -eval instant at 20s min_over_time(metric[10s:10s]) +# min(2@20) +eval instant at 20s min_over_time(metric[15s:10s]) {} 1 eval instant at 20m min_over_time(rate(metric[5m])[20m:1m]) diff --git a/pkg/streamingpromql/testdata/upstream/trig_functions.test b/pkg/streamingpromql/testdata/upstream/trig_functions.test index fb657867b56..7192da1aecb 100644 --- a/pkg/streamingpromql/testdata/upstream/trig_functions.test +++ b/pkg/streamingpromql/testdata/upstream/trig_functions.test @@ -10,92 +10,92 @@ load 5m trig{l="y"} 20 trig{l="NaN"} NaN -eval instant at 5m sin(trig) +eval instant at 1m sin(trig) {l="x"} -0.5440211108893699 {l="y"} 0.9129452507276277 {l="NaN"} NaN -eval instant at 5m cos(trig) +eval instant at 1m cos(trig) {l="x"} -0.8390715290764524 {l="y"} 0.40808206181339196 {l="NaN"} NaN -eval instant at 5m tan(trig) +eval instant at 1m tan(trig) {l="x"} 0.6483608274590867 {l="y"} 2.2371609442247427 {l="NaN"} NaN -eval instant at 5m asin(trig - 10.1) +eval instant at 1m asin(trig - 10.1) {l="x"} -0.10016742116155944 {l="y"} NaN {l="NaN"} NaN -eval instant at 5m acos(trig - 10.1) +eval instant at 1m acos(trig - 10.1) {l="x"} 1.670963747956456 {l="y"} NaN {l="NaN"} NaN -eval instant at 5m atan(trig) +eval instant at 1m atan(trig) {l="x"} 1.4711276743037345 {l="y"} 1.5208379310729538 {l="NaN"} NaN -eval instant at 5m sinh(trig) +eval instant at 1m sinh(trig) {l="x"} 11013.232920103324 {l="y"} 2.4258259770489514e+08 {l="NaN"} NaN -eval instant at 5m cosh(trig) +eval instant at 1m cosh(trig) {l="x"} 11013.232920103324 {l="y"} 2.4258259770489514e+08 {l="NaN"} NaN -eval instant at 5m tanh(trig) +eval instant at 1m tanh(trig) {l="x"} 0.9999999958776927 {l="y"} 1 {l="NaN"} NaN -eval instant at 5m asinh(trig) +eval instant at 1m asinh(trig) {l="x"} 2.99822295029797 {l="y"} 3.6895038689889055 {l="NaN"} NaN -eval instant at 5m acosh(trig) +eval instant at 1m acosh(trig) {l="x"} 2.993222846126381 {l="y"} 3.6882538673612966 {l="NaN"} NaN -eval instant at 5m atanh(trig - 10.1) +eval instant at 1m atanh(trig - 10.1) {l="x"} -0.10033534773107522 {l="y"} NaN {l="NaN"} NaN -eval instant at 5m rad(trig) +eval instant at 1m rad(trig) {l="x"} 0.17453292519943295 {l="y"} 0.3490658503988659 {l="NaN"} NaN -eval instant at 5m rad(trig - 10) +eval instant at 1m rad(trig - 10) {l="x"} 0 {l="y"} 0.17453292519943295 {l="NaN"} NaN -eval instant at 5m rad(trig - 20) +eval instant at 1m rad(trig - 20) {l="x"} -0.17453292519943295 {l="y"} 0 {l="NaN"} NaN -eval instant at 5m deg(trig) +eval instant at 1m deg(trig) {l="x"} 572.9577951308232 {l="y"} 1145.9155902616465 {l="NaN"} NaN -eval instant at 5m deg(trig - 10) +eval instant at 1m deg(trig - 10) {l="x"} 0 {l="y"} 572.9577951308232 {l="NaN"} NaN -eval instant at 5m deg(trig - 20) +eval instant at 1m deg(trig - 20) {l="x"} -572.9577951308232 {l="y"} 0 {l="NaN"} NaN diff --git a/pkg/streamingpromql/testutils/utils.go b/pkg/streamingpromql/testutils/utils.go index f4c7dccb052..44be683a835 100644 --- a/pkg/streamingpromql/testutils/utils.go +++ b/pkg/streamingpromql/testutils/utils.go @@ -60,6 +60,8 @@ func RequireEqualResults(t testing.TB, expr string, expected, actual *promql.Res actualSeries := actualMatrix[i] require.Equal(t, expectedSeries.Metric, actualSeries.Metric) + require.Lenf(t, actualSeries.Floats, len(expectedSeries.Floats), "expected result %v for series %v", expectedSeries.Floats, expectedSeries.Metric) + require.Lenf(t, actualSeries.Histograms, len(expectedSeries.Histograms), "expected result %v for series %v", expectedSeries.Histograms, expectedSeries.Metric) for j, expectedPoint := range expectedSeries.Floats { actualPoint := actualSeries.Floats[j] diff --git a/pkg/streamingpromql/types/data.go b/pkg/streamingpromql/types/data.go index e8a95d5134b..72ba35553d4 100644 --- a/pkg/streamingpromql/types/data.go +++ b/pkg/streamingpromql/types/data.go @@ -98,7 +98,8 @@ type RangeVectorStepData struct { StepT int64 // RangeStart is the beginning of the time range selected by this time step. - // RangeStart is inclusive (ie. points with timestamp >= RangeStart are included in the range). + // RangeStart is exclusive (ie. points with timestamp > RangeStart are included in the range, + // and the point with timestamp == RangeStart is excluded). RangeStart int64 // RangeEnd is the end of the time range selected by this time step. diff --git a/pkg/streamingpromql/types/fpoint_ring_buffer.go b/pkg/streamingpromql/types/fpoint_ring_buffer.go index b6e3993cd1d..e18964304e3 100644 --- a/pkg/streamingpromql/types/fpoint_ring_buffer.go +++ b/pkg/streamingpromql/types/fpoint_ring_buffer.go @@ -29,9 +29,9 @@ func NewFPointRingBuffer(memoryConsumptionTracker *limiting.MemoryConsumptionTra return &FPointRingBuffer{memoryConsumptionTracker: memoryConsumptionTracker} } -// DiscardPointsBefore discards all points in this buffer with timestamp less than t. -func (b *FPointRingBuffer) DiscardPointsBefore(t int64) { - for b.size > 0 && b.points[b.firstIndex].T < t { +// DiscardPointsAtOrBefore discards all points in this buffer with timestamp less than or equal to t. +func (b *FPointRingBuffer) DiscardPointsAtOrBefore(t int64) { + for b.size > 0 && b.points[b.firstIndex].T <= t { b.firstIndex++ b.size-- diff --git a/pkg/streamingpromql/types/hpoint_ring_buffer.go b/pkg/streamingpromql/types/hpoint_ring_buffer.go index 045a70115f8..de95f6e2f97 100644 --- a/pkg/streamingpromql/types/hpoint_ring_buffer.go +++ b/pkg/streamingpromql/types/hpoint_ring_buffer.go @@ -29,9 +29,9 @@ func NewHPointRingBuffer(memoryConsumptionTracker *limiting.MemoryConsumptionTra return &HPointRingBuffer{memoryConsumptionTracker: memoryConsumptionTracker} } -// DiscardPointsBefore discards all points in this buffer with timestamp less than t. -func (b *HPointRingBuffer) DiscardPointsBefore(t int64) { - for b.size > 0 && b.points[b.firstIndex].T < t { +// DiscardPointsAtOrBefore discards all points in this buffer with timestamp less than or equal to t. +func (b *HPointRingBuffer) DiscardPointsAtOrBefore(t int64) { + for b.size > 0 && b.points[b.firstIndex].T <= t { b.firstIndex++ b.size-- diff --git a/pkg/streamingpromql/types/ring_buffer_test.go b/pkg/streamingpromql/types/ring_buffer_test.go index cc1bc050f9c..ed6b2d2b327 100644 --- a/pkg/streamingpromql/types/ring_buffer_test.go +++ b/pkg/streamingpromql/types/ring_buffer_test.go @@ -16,7 +16,7 @@ import ( // We want to ensure FPoint+HPoint ring buffers are tested consistently, // and we don't care about performance here so we can use an interface+generics. type ringBuffer[T any] interface { - DiscardPointsBefore(t int64) + DiscardPointsAtOrBefore(t int64) Append(p T) error Reset() Use(s []T) @@ -77,7 +77,7 @@ func TestRingBuffer(t *testing.T) { func testRingBuffer[T any](t *testing.T, buf ringBuffer[T], points []T) { shouldHaveNoPoints(t, buf) - buf.DiscardPointsBefore(1) // Should handle empty buffer. + buf.DiscardPointsAtOrBefore(0) // Should handle empty buffer. shouldHaveNoPoints(t, buf) require.NoError(t, buf.Append(points[0])) @@ -86,16 +86,16 @@ func testRingBuffer[T any](t *testing.T, buf ringBuffer[T], points []T) { require.NoError(t, buf.Append(points[1])) shouldHavePoints(t, buf, points[:2]...) - buf.DiscardPointsBefore(1) + buf.DiscardPointsAtOrBefore(0) shouldHavePoints(t, buf, points[:2]...) // No change. - buf.DiscardPointsBefore(2) + buf.DiscardPointsAtOrBefore(1) shouldHavePoints(t, buf, points[1:2]...) require.NoError(t, buf.Append(points[2])) shouldHavePoints(t, buf, points[1:3]...) - buf.DiscardPointsBefore(4) + buf.DiscardPointsAtOrBefore(3) shouldHaveNoPoints(t, buf) require.NoError(t, buf.Append(points[3])) @@ -123,7 +123,7 @@ func testRingBuffer[T any](t *testing.T, buf ringBuffer[T], points []T) { buf.Use(pointsWithPowerOfTwoCapacity) shouldHavePoints(t, buf, points...) - buf.DiscardPointsBefore(5) + buf.DiscardPointsAtOrBefore(4) shouldHavePoints(t, buf, points[4:]...) buf.Release() @@ -176,7 +176,7 @@ func testDiscardPointsBeforeThroughWrapAround[T any](t *testing.T, buf ringBuffe // Ideally we wouldn't reach into the internals here, but this helps ensure the test is testing the correct scenario. require.Len(t, buf.GetPoints(), 4, "expected underlying slice to have length 4, if this assertion fails, the test setup is not as expected") require.Equal(t, 4, cap(buf.GetPoints()), "expected underlying slice to have capacity 4, if this assertion fails, the test setup is not as expected") - buf.DiscardPointsBefore(3) + buf.DiscardPointsAtOrBefore(2) require.NoError(t, buf.Append(points[4])) require.NoError(t, buf.Append(points[5])) @@ -185,13 +185,13 @@ func testDiscardPointsBeforeThroughWrapAround[T any](t *testing.T, buf ringBuffe require.Equal(t, 4, cap(buf.GetPoints()), "expected underlying slice to have capacity 4") // Discard before end of underlying slice. - buf.DiscardPointsBefore(4) + buf.DiscardPointsAtOrBefore(3) shouldHavePoints(t, buf, points[3:6]...) require.Equal(t, 3, buf.GetFirstIndex(), "expected first point to be in middle of underlying slice, if this assertion fails, the test setup is not as expected") // Discard after wraparound. - buf.DiscardPointsBefore(6) + buf.DiscardPointsAtOrBefore(5) shouldHavePoints(t, buf, points[5]) } @@ -261,7 +261,7 @@ func TestRingBuffer_RemoveLastPoint(t *testing.T) { require.Len(t, buf.GetPoints(), 4, "expected underlying slice to have length 4, if this assertion fails, the test setup is not as expected") require.Equal(t, 4, cap(buf.GetPoints()), "expected underlying slice to have capacity 4, if this assertion fails, the test setup is not as expected") require.Equal(t, 4, buf.size, "The size includes all points") - buf.DiscardPointsBefore(3) + buf.DiscardPointsAtOrBefore(2) require.Equal(t, 2, buf.size, "The size is reduced by the removed points") require.Equal(t, 2, buf.GetFirstIndex(), "the firstIndex is half way through the ring") diff --git a/pkg/util/log/log.go b/pkg/util/log/log.go index caa398f7be9..201a29a7cf1 100644 --- a/pkg/util/log/log.go +++ b/pkg/util/log/log.go @@ -54,21 +54,51 @@ func InitLogger(logFormat string, logLevel dslog.Level, buffered bool, rateLimit return logger } +type logLevel int + +const ( + debugLevel logLevel = iota + infoLevel + warnLevel + errorLevel +) + +type leveledLogger interface { + level() logLevel +} + +var _ leveledLogger = levelFilter{} + // Pass through Logger and implement the DebugEnabled interface that spanlogger looks for. type levelFilter struct { log.Logger - debugEnabled bool + lvl logLevel } func newFilter(logger log.Logger, lvl dslog.Level) log.Logger { + var l logLevel + switch lvl.String() { + case "info": + l = infoLevel + case "warn": + l = warnLevel + case "error": + l = errorLevel + default: + l = debugLevel + } return &levelFilter{ - Logger: level.NewFilter(logger, lvl.Option), - debugEnabled: lvl.String() == "debug", // Using inside knowledge about the hierarchy of possible options. + Logger: level.NewFilter(logger, lvl.Option), + lvl: l, } } +func (f levelFilter) level() logLevel { + return f.lvl +} + func (f *levelFilter) DebugEnabled() bool { - return f.debugEnabled + return f.lvl <= debugLevel } func getWriter(buffered bool) io.Writer { diff --git a/pkg/util/log/slogadapter.go b/pkg/util/log/slogadapter.go new file mode 100644 index 00000000000..318b742722f --- /dev/null +++ b/pkg/util/log/slogadapter.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package log + +import ( + "log/slog" + + "github.com/go-kit/log" + slgk "github.com/tjhop/slog-gokit" +) + +// SlogFromGoKit returns slog adapter for logger. +func SlogFromGoKit(logger log.Logger) *slog.Logger { + var sl slog.Level + x, ok := logger.(leveledLogger) + if !ok { + sl = slog.LevelDebug + } else { + switch x.level() { + case infoLevel: + sl = slog.LevelInfo + case warnLevel: + sl = slog.LevelWarn + case errorLevel: + sl = slog.LevelError + default: + sl = slog.LevelDebug + } + } + + lvl := slog.LevelVar{} + lvl.Set(sl) + return slog.New(slgk.NewGoKitHandler(logger, &lvl)) +} diff --git a/pkg/util/log/slogadapter_test.go b/pkg/util/log/slogadapter_test.go new file mode 100644 index 00000000000..192f761239c --- /dev/null +++ b/pkg/util/log/slogadapter_test.go @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package log + +import ( + "context" + "fmt" + "log/slog" + "testing" + + "github.com/go-kit/log/level" + dslog "github.com/grafana/dskit/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestSlogFromGoKit(t *testing.T) { + levels := []level.Value{ + level.DebugValue(), + level.InfoValue(), + level.WarnValue(), + level.ErrorValue(), + } + slogLevels := []slog.Level{ + slog.LevelDebug, + slog.LevelInfo, + slog.LevelWarn, + slog.LevelError, + } + + t.Run("enabled for the right slog levels when go-kit level configured", func(t *testing.T) { + for i, l := range levels { + var lvl dslog.Level + switch i { + case 0: + require.NoError(t, lvl.Set("debug")) + case 1: + require.NoError(t, lvl.Set("info")) + case 2: + require.NoError(t, lvl.Set("warn")) + case 3: + require.NoError(t, lvl.Set("error")) + default: + panic(fmt.Errorf("unhandled level %d", i)) + } + + mLogger := &mockLogger{} + logger := newFilter(mLogger, lvl) + slogger := SlogFromGoKit(logger) + + for j, sl := range slogLevels { + if j >= i { + assert.Truef(t, slogger.Enabled(context.Background(), sl), "slog logger should be enabled for go-kit level %v / slog level %v", l, sl) + } else { + assert.Falsef(t, slogger.Enabled(context.Background(), sl), "slog logger should not be enabled for go-kit level %v / slog level %v", l, sl) + } + } + } + }) + + t.Run("enabled for the right slog levels when go-kit level not configured", func(t *testing.T) { + mLogger := &mockLogger{} + slogger := SlogFromGoKit(mLogger) + + for _, sl := range slogLevels { + assert.Truef(t, slogger.Enabled(context.Background(), sl), "slog logger should be enabled for level %v", sl) + } + }) + + t.Run("wraps go-kit logger", func(*testing.T) { + mLogger := &mockLogger{} + slogger := SlogFromGoKit(mLogger) + + for _, l := range levels { + mLogger.On("Log", level.Key(), l, "caller", mock.AnythingOfType("string"), "time", mock.AnythingOfType("time.Time"), "msg", "test", "attr", slog.StringValue("value")).Times(1).Return(nil) + attrs := []any{"attr", "value"} + switch l { + case level.DebugValue(): + slogger.Debug("test", attrs...) + case level.InfoValue(): + slogger.Info("test", attrs...) + case level.WarnValue(): + slogger.Warn("test", attrs...) + case level.ErrorValue(): + slogger.Error("test", attrs...) + default: + panic(fmt.Errorf("unrecognized level %v", l)) + } + } + }) +} diff --git a/pkg/util/promqlext/promqlext.go b/pkg/util/promqlext/promqlext.go new file mode 100644 index 00000000000..6f32241d06f --- /dev/null +++ b/pkg/util/promqlext/promqlext.go @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package promqlext + +import ( + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" +) + +// ExtendPromQL enriches PromQL with Mimir extensions. +func ExtendPromQL() { + // Keep an alias for users using holt_winters, even though dropped in Prometheus v3. + promql.FunctionCalls["holt_winters"] = promql.FunctionCalls["double_exponential_smoothing"] + parser.Functions["holt_winters"] = parser.Functions["double_exponential_smoothing"] + parser.Functions["holt_winters"].Experimental = false +} diff --git a/tools/splitblocks/main_test.go b/tools/splitblocks/main_test.go index 4b60dc3192b..e3668fd68fb 100644 --- a/tools/splitblocks/main_test.go +++ b/tools/splitblocks/main_test.go @@ -12,6 +12,7 @@ import ( "github.com/go-kit/log" "github.com/grafana/dskit/runutil" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" @@ -200,7 +201,7 @@ func buildSeriesSpec(startOfDay time.Time) []*block.SeriesSpec { } func listSeriesAndChunksFromBlock(t *testing.T, blockDir string) []*block.SeriesSpec { - blk, err := tsdb.OpenBlock(log.NewNopLogger(), blockDir, nil) + blk, err := tsdb.OpenBlock(promslog.NewNopLogger(), blockDir, nil) require.NoError(t, err) chunkReader, err := blk.Chunks() require.NoError(t, err) diff --git a/tools/tsdb-compact/main.go b/tools/tsdb-compact/main.go index e04bd6d45cb..efcad971f9c 100644 --- a/tools/tsdb-compact/main.go +++ b/tools/tsdb-compact/main.go @@ -15,6 +15,8 @@ import ( golog "github.com/go-kit/log" "github.com/grafana/dskit/flagext" "github.com/prometheus/prometheus/tsdb" + + util_log "github.com/grafana/mimir/pkg/util/log" ) func main() { @@ -55,7 +57,7 @@ func main() { blockDirs = append(blockDirs, d) - b, err := tsdb.OpenBlock(logger, d, nil) + b, err := tsdb.OpenBlock(util_log.SlogFromGoKit(logger), d, nil) if err != nil { log.Fatalln("failed to open block:", d, err) } @@ -86,7 +88,7 @@ func main() { ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) defer cancel() - c, err := tsdb.NewLeveledCompactorWithChunkSize(ctx, nil, logger, []int64{0}, nil, segmentSizeMB*1024*1024, nil) + c, err := tsdb.NewLeveledCompactorWithChunkSize(ctx, nil, util_log.SlogFromGoKit(logger), []int64{0}, nil, segmentSizeMB*1024*1024, nil) if err != nil { log.Fatalln("creating compator", err) } diff --git a/tools/tsdb-gaps/main.go b/tools/tsdb-gaps/main.go index d91865b4380..eba3f51b31a 100644 --- a/tools/tsdb-gaps/main.go +++ b/tools/tsdb-gaps/main.go @@ -18,6 +18,8 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" + + util_log "github.com/grafana/mimir/pkg/util/log" ) const ( @@ -213,7 +215,7 @@ func main() { func analyzeBlockForGaps(ctx context.Context, cfg config, blockDir string, matchers []*labels.Matcher) (blockGapStats, error) { var blockStats blockGapStats blockStats.BlockID = blockDir - b, err := tsdb.OpenBlock(logger, blockDir, nil) + b, err := tsdb.OpenBlock(util_log.SlogFromGoKit(logger), blockDir, nil) if err != nil { return blockStats, fmt.Errorf("failed to open block: %w", err) } diff --git a/tools/tsdb-print-chunk/main.go b/tools/tsdb-print-chunk/main.go index 7e860a0df2b..2db173031f5 100644 --- a/tools/tsdb-print-chunk/main.go +++ b/tools/tsdb-print-chunk/main.go @@ -14,6 +14,8 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" + + util_log "github.com/grafana/mimir/pkg/util/log" ) var logger = log.NewLogfmtLogger(os.Stderr) @@ -30,7 +32,7 @@ func main() { } func printChunks(blockDir string, chunkRefs []string) { - b, err := tsdb.OpenBlock(logger, blockDir, nil) + b, err := tsdb.OpenBlock(util_log.SlogFromGoKit(logger), blockDir, nil) if err != nil { fmt.Fprintln(os.Stderr, "Failed to open TSDB block", blockDir, "due to error:", err) os.Exit(1) diff --git a/tools/tsdb-series/main.go b/tools/tsdb-series/main.go index 3bfc0469036..2650042788f 100644 --- a/tools/tsdb-series/main.go +++ b/tools/tsdb-series/main.go @@ -21,6 +21,8 @@ import ( "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/index" + + util_log "github.com/grafana/mimir/pkg/util/log" ) var logger = log.NewLogfmtLogger(os.Stderr) @@ -104,7 +106,7 @@ type seriesWithStats struct { } func printBlockIndex(ctx context.Context, blockDir string, printChunks bool, seriesStats bool, matchers []*labels.Matcher, minTime time.Time, maxTime time.Time, jsonOutput bool) { - block, err := tsdb.OpenBlock(logger, blockDir, nil) + block, err := tsdb.OpenBlock(util_log.SlogFromGoKit(logger), blockDir, nil) if err != nil { level.Error(logger).Log("msg", "failed to open block", "dir", blockDir, "err", err) return diff --git a/tools/tsdb-symbols/main.go b/tools/tsdb-symbols/main.go index a4d9979f1e1..fabdbc057d1 100644 --- a/tools/tsdb-symbols/main.go +++ b/tools/tsdb-symbols/main.go @@ -9,11 +9,11 @@ import ( "fmt" "io" "log" + "log/slog" "os" "path/filepath" "time" - gokitlog "github.com/go-kit/log" "github.com/grafana/dskit/flagext" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" @@ -90,7 +90,7 @@ func main() { } func analyseSymbols(ctx context.Context, blockDir string, uniqueSymbols map[string]struct{}, uniqueSymbolsPerShard []map[string]struct{}) error { - block, err := tsdb.OpenBlock(gokitlog.NewLogfmtLogger(os.Stderr), blockDir, nil) + block, err := tsdb.OpenBlock(slog.New(slog.NewTextHandler(os.Stderr, nil)), blockDir, nil) if err != nil { return fmt.Errorf("failed to open block: %v", err) } diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go index a1551dbc877..8eb8b08c600 100644 --- a/vendor/github.com/golang/glog/glog_file.go +++ b/vendor/github.com/golang/glog/glog_file.go @@ -26,7 +26,6 @@ import ( "fmt" "io" "os" - "os/user" "path/filepath" "runtime" "strings" @@ -68,9 +67,8 @@ func init() { host = shortHostname(h) } - current, err := user.Current() - if err == nil { - userName = current.Username + if u := lookupUser(); u != "" { + userName = u } // Sanitize userName since it is used to construct file paths. userName = strings.Map(func(r rune) rune { diff --git a/vendor/github.com/golang/glog/glog_file_nonwindows.go b/vendor/github.com/golang/glog/glog_file_nonwindows.go new file mode 100644 index 00000000000..d5cdb793c54 --- /dev/null +++ b/vendor/github.com/golang/glog/glog_file_nonwindows.go @@ -0,0 +1,12 @@ +//go:build !windows + +package glog + +import "os/user" + +func lookupUser() string { + if current, err := user.Current(); err == nil { + return current.Username + } + return "" +} diff --git a/vendor/github.com/golang/glog/glog_file_windows.go b/vendor/github.com/golang/glog/glog_file_windows.go new file mode 100644 index 00000000000..a9e4f609dfb --- /dev/null +++ b/vendor/github.com/golang/glog/glog_file_windows.go @@ -0,0 +1,30 @@ +//go:build windows + +package glog + +import ( + "syscall" +) + +// This follows the logic in the standard library's user.Current() function, except +// that it leaves out the potentially expensive calls required to look up the user's +// display name in Active Directory. +func lookupUser() string { + token, err := syscall.OpenCurrentProcessToken() + if err != nil { + return "" + } + defer token.Close() + tokenUser, err := token.GetTokenUser() + if err != nil { + return "" + } + username, _, accountType, err := tokenUser.User.Sid.LookupAccount("") + if err != nil { + return "" + } + if accountType != syscall.SidTypeUser { + return "" + } + return username +} diff --git a/vendor/github.com/prometheus/common/promslog/slog.go b/vendor/github.com/prometheus/common/promslog/slog.go new file mode 100644 index 00000000000..1677605af1e --- /dev/null +++ b/vendor/github.com/prometheus/common/promslog/slog.go @@ -0,0 +1,198 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promslog defines standardised ways to initialize the Go standard +// library's log/slog logger. +// It should typically only ever be imported by main packages. + +package promslog + +import ( + "fmt" + "io" + "log/slog" + "os" + "path/filepath" + "strconv" + "strings" +) + +type LogStyle string + +const ( + SlogStyle LogStyle = "slog" + GoKitStyle LogStyle = "go-kit" +) + +var ( + LevelFlagOptions = []string{"debug", "info", "warn", "error"} + FormatFlagOptions = []string{"logfmt", "json"} + + callerAddFunc = false + defaultWriter = os.Stderr + goKitStyleReplaceAttrFunc = func(groups []string, a slog.Attr) slog.Attr { + key := a.Key + switch key { + case slog.TimeKey: + a.Key = "ts" + + // This timestamp format differs from RFC3339Nano by using .000 instead + // of .999999999 which changes the timestamp from 9 variable to 3 fixed + // decimals (.130 instead of .130987456). + t := a.Value.Time() + a.Value = slog.StringValue(t.UTC().Format("2006-01-02T15:04:05.000Z07:00")) + case slog.SourceKey: + a.Key = "caller" + src, _ := a.Value.Any().(*slog.Source) + + switch callerAddFunc { + case true: + a.Value = slog.StringValue(filepath.Base(src.File) + "(" + filepath.Base(src.Function) + "):" + strconv.Itoa(src.Line)) + default: + a.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line)) + } + case slog.LevelKey: + a.Value = slog.StringValue(strings.ToLower(a.Value.String())) + default: + } + + return a + } + truncateSourceAttrFunc = func(groups []string, a slog.Attr) slog.Attr { + if a.Key != slog.SourceKey { + return a + } + + if src, ok := a.Value.Any().(*slog.Source); ok { + a.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line)) + } + + return a + } +) + +// AllowedLevel is a settable identifier for the minimum level a log entry +// must be have. +type AllowedLevel struct { + s string + lvl *slog.LevelVar +} + +func (l *AllowedLevel) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + type plain string + if err := unmarshal((*plain)(&s)); err != nil { + return err + } + if s == "" { + return nil + } + lo := &AllowedLevel{} + if err := lo.Set(s); err != nil { + return err + } + *l = *lo + return nil +} + +func (l *AllowedLevel) String() string { + return l.s +} + +// Set updates the value of the allowed level. +func (l *AllowedLevel) Set(s string) error { + if l.lvl == nil { + l.lvl = &slog.LevelVar{} + } + + switch s { + case "debug": + l.lvl.Set(slog.LevelDebug) + callerAddFunc = true + case "info": + l.lvl.Set(slog.LevelInfo) + callerAddFunc = false + case "warn": + l.lvl.Set(slog.LevelWarn) + callerAddFunc = false + case "error": + l.lvl.Set(slog.LevelError) + callerAddFunc = false + default: + return fmt.Errorf("unrecognized log level %s", s) + } + l.s = s + return nil +} + +// AllowedFormat is a settable identifier for the output format that the logger can have. +type AllowedFormat struct { + s string +} + +func (f *AllowedFormat) String() string { + return f.s +} + +// Set updates the value of the allowed format. +func (f *AllowedFormat) Set(s string) error { + switch s { + case "logfmt", "json": + f.s = s + default: + return fmt.Errorf("unrecognized log format %s", s) + } + return nil +} + +// Config is a struct containing configurable settings for the logger +type Config struct { + Level *AllowedLevel + Format *AllowedFormat + Style LogStyle + Writer io.Writer +} + +// New returns a new slog.Logger. Each logged line will be annotated +// with a timestamp. The output always goes to stderr. +func New(config *Config) *slog.Logger { + if config.Level == nil { + config.Level = &AllowedLevel{} + _ = config.Level.Set("info") + } + + if config.Writer == nil { + config.Writer = defaultWriter + } + + logHandlerOpts := &slog.HandlerOptions{ + Level: config.Level.lvl, + AddSource: true, + ReplaceAttr: truncateSourceAttrFunc, + } + + if config.Style == GoKitStyle { + logHandlerOpts.ReplaceAttr = goKitStyleReplaceAttrFunc + } + + if config.Format != nil && config.Format.s == "json" { + return slog.New(slog.NewJSONHandler(config.Writer, logHandlerOpts)) + } + return slog.New(slog.NewTextHandler(config.Writer, logHandlerOpts)) +} + +// NewNopLogger is a convenience function to return an slog.Logger that writes +// to io.Discard. +func NewNopLogger() *slog.Logger { + return slog.New(slog.NewTextHandler(io.Discard, nil)) +} diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/handler.go b/vendor/github.com/prometheus/exporter-toolkit/web/handler.go index c607a163a32..51da762c957 100644 --- a/vendor/github.com/prometheus/exporter-toolkit/web/handler.go +++ b/vendor/github.com/prometheus/exporter-toolkit/web/handler.go @@ -18,11 +18,11 @@ package web import ( "encoding/hex" "fmt" + "log/slog" "net/http" "strings" "sync" - "github.com/go-kit/log" "golang.org/x/crypto/bcrypt" ) @@ -78,7 +78,7 @@ HeadersLoop: type webHandler struct { tlsConfigPath string handler http.Handler - logger log.Logger + logger *slog.Logger cache *cache // bcryptMtx is there to ensure that bcrypt.CompareHashAndPassword is run // only once in parallel as this is CPU intensive. @@ -88,7 +88,7 @@ type webHandler struct { func (u *webHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { c, err := getConfig(u.tlsConfigPath) if err != nil { - u.logger.Log("msg", "Unable to parse configuration", "err", err) + u.logger.Error("Unable to parse configuration", "err", err.Error()) http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) return } diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go b/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go index b16ded1d657..0730a938fde 100644 --- a/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go +++ b/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go @@ -18,6 +18,7 @@ import ( "crypto/x509" "errors" "fmt" + "log/slog" "net" "net/http" "net/url" @@ -27,8 +28,6 @@ import ( "strings" "github.com/coreos/go-systemd/v22/activation" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/mdlayher/vsock" config_util "github.com/prometheus/common/config" "golang.org/x/sync/errgroup" @@ -267,7 +266,7 @@ func ConfigToTLSConfig(c *TLSConfig) (*tls.Config, error) { // ServeMultiple starts the server on the given listeners. The FlagConfig is // also passed on to Serve. -func ServeMultiple(listeners []net.Listener, server *http.Server, flags *FlagConfig, logger log.Logger) error { +func ServeMultiple(listeners []net.Listener, server *http.Server, flags *FlagConfig, logger *slog.Logger) error { errs := new(errgroup.Group) for _, l := range listeners { l := l @@ -284,13 +283,13 @@ func ServeMultiple(listeners []net.Listener, server *http.Server, flags *FlagCon // Or instead uses systemd socket activated listeners if WebSystemdSocket in the // FlagConfig is true. // The FlagConfig is also passed on to ServeMultiple. -func ListenAndServe(server *http.Server, flags *FlagConfig, logger log.Logger) error { +func ListenAndServe(server *http.Server, flags *FlagConfig, logger *slog.Logger) error { if flags.WebSystemdSocket == nil && (flags.WebListenAddresses == nil || len(*flags.WebListenAddresses) == 0) { return ErrNoListeners } if flags.WebSystemdSocket != nil && *flags.WebSystemdSocket { - level.Info(logger).Log("msg", "Listening on systemd activated listeners instead of port listeners.") + logger.Info("Listening on systemd activated listeners instead of port listeners.") listeners, err := activation.Listeners() if err != nil { return err @@ -344,11 +343,11 @@ func parseVsockPort(address string) (uint32, error) { // Server starts the server on the given listener. Based on the file path // WebConfigFile in the FlagConfig, TLS or basic auth could be enabled. -func Serve(l net.Listener, server *http.Server, flags *FlagConfig, logger log.Logger) error { - level.Info(logger).Log("msg", "Listening on", "address", l.Addr().String()) +func Serve(l net.Listener, server *http.Server, flags *FlagConfig, logger *slog.Logger) error { + logger.Info("Listening on", "address", l.Addr().String()) tlsConfigPath := *flags.WebConfigFile if tlsConfigPath == "" { - level.Info(logger).Log("msg", "TLS is disabled.", "http2", false, "address", l.Addr().String()) + logger.Info("TLS is disabled.", "http2", false, "address", l.Addr().String()) return server.Serve(l) } @@ -381,10 +380,10 @@ func Serve(l net.Listener, server *http.Server, flags *FlagConfig, logger log.Lo server.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) } // Valid TLS config. - level.Info(logger).Log("msg", "TLS is enabled.", "http2", c.HTTPConfig.HTTP2, "address", l.Addr().String()) + logger.Info("TLS is enabled.", "http2", c.HTTPConfig.HTTP2, "address", l.Addr().String()) case errNoTLSConfig: // No TLS config, back to plain HTTP. - level.Info(logger).Log("msg", "TLS is disabled.", "http2", false, "address", l.Addr().String()) + logger.Info("TLS is disabled.", "http2", false, "address", l.Addr().String()) return server.Serve(l) default: // Invalid TLS config. @@ -512,6 +511,6 @@ func (tv *TLSVersion) MarshalYAML() (interface{}, error) { // tlsConfigPath, TLS or basic auth could be enabled. // // Deprecated: Use ListenAndServe instead. -func Listen(server *http.Server, flags *FlagConfig, logger log.Logger) error { +func Listen(server *http.Server, flags *FlagConfig, logger *slog.Logger) error { return ListenAndServe(server, flags, logger) } diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 4f80b551bc6..30a74e04025 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -16,6 +16,8 @@ package config import ( "errors" "fmt" + "log/slog" + "mime" "net/url" "os" "path/filepath" @@ -25,8 +27,6 @@ import ( "time" "github.com/alecthomas/units" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/grafana/regexp" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -73,7 +73,7 @@ const ( ) // Load parses the YAML input s into a Config. -func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) { +func Load(s string, logger *slog.Logger) (*Config, error) { cfg := &Config{} // If the entire config body is empty the UnmarshalYAML method is // never called. We thus have to set the DefaultConfig at the entry @@ -85,10 +85,6 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro return nil, err } - if !expandExternalLabels { - return cfg, nil - } - b := labels.NewScratchBuilder(0) cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) { newV := os.Expand(v.Value, func(s string) string { @@ -98,26 +94,28 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro if v := os.Getenv(s); v != "" { return v } - level.Warn(logger).Log("msg", "Empty environment variable", "name", s) + logger.Warn("Empty environment variable", "name", s) return "" }) if newV != v.Value { - level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV) + logger.Debug("External label replaced", "label", v.Name, "input", v.Value, "output", newV) } // Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024 b.Add(v.Name, newV) }) - cfg.GlobalConfig.ExternalLabels = b.Labels() + if !b.Labels().IsEmpty() { + cfg.GlobalConfig.ExternalLabels = b.Labels() + } return cfg, nil } // LoadFile parses the given YAML file into a Config. -func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) { +func LoadFile(filename string, agentMode bool, logger *slog.Logger) (*Config, error) { content, err := os.ReadFile(filename) if err != nil { return nil, err } - cfg, err := Load(string(content), expandExternalLabels, logger) + cfg, err := Load(string(content), logger) if err != nil { return nil, fmt.Errorf("parsing YAML file %s: %w", filename, err) } @@ -166,13 +164,13 @@ var ( // DefaultScrapeConfig is the default scrape configuration. DefaultScrapeConfig = ScrapeConfig{ // ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals. - ScrapeClassicHistograms: false, - MetricsPath: "/metrics", - Scheme: "http", - HonorLabels: false, - HonorTimestamps: true, - HTTPClientConfig: config.DefaultHTTPClientConfig, - EnableCompression: true, + AlwaysScrapeClassicHistograms: false, + MetricsPath: "/metrics", + Scheme: "http", + HonorLabels: false, + HonorTimestamps: true, + HTTPClientConfig: config.DefaultHTTPClientConfig, + EnableCompression: true, } // DefaultAlertmanagerConfig is the default alertmanager configuration. @@ -183,13 +181,18 @@ var ( HTTPClientConfig: config.DefaultHTTPClientConfig, } + DefaultRemoteWriteHTTPClientConfig = config.HTTPClientConfig{ + FollowRedirects: true, + EnableHTTP2: false, + } + // DefaultRemoteWriteConfig is the default remote write configuration. DefaultRemoteWriteConfig = RemoteWriteConfig{ RemoteTimeout: model.Duration(30 * time.Second), ProtobufMessage: RemoteWriteProtoMsgV1, QueueConfig: DefaultQueueConfig, MetadataConfig: DefaultMetadataConfig, - HTTPClientConfig: config.DefaultHTTPClientConfig, + HTTPClientConfig: DefaultRemoteWriteHTTPClientConfig, } // DefaultQueueConfig is the default remote queue configuration. @@ -476,9 +479,22 @@ func (s ScrapeProtocol) Validate() error { return nil } +// HeaderMediaType returns the MIME mediaType for a particular ScrapeProtocol. +func (s ScrapeProtocol) HeaderMediaType() string { + if _, ok := ScrapeProtocolsHeaders[s]; !ok { + return "" + } + mediaType, _, err := mime.ParseMediaType(ScrapeProtocolsHeaders[s]) + if err != nil { + return "" + } + return mediaType +} + var ( PrometheusProto ScrapeProtocol = "PrometheusProto" PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4" + PrometheusText1_0_0 ScrapeProtocol = "PrometheusText1.0.0" OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1" OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0" UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8 @@ -486,6 +502,7 @@ var ( ScrapeProtocolsHeaders = map[ScrapeProtocol]string{ PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", PrometheusText0_0_4: "text/plain;version=0.0.4", + PrometheusText1_0_0: "text/plain;version=1.0.0;escaping=allow-utf-8", OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1", OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0", } @@ -495,6 +512,7 @@ var ( DefaultScrapeProtocols = []ScrapeProtocol{ OpenMetricsText1_0_0, OpenMetricsText0_0_1, + PrometheusText1_0_0, PrometheusText0_0_4, } @@ -506,6 +524,7 @@ var ( PrometheusProto, OpenMetricsText1_0_0, OpenMetricsText0_0_1, + PrometheusText1_0_0, PrometheusText0_0_4, } ) @@ -632,10 +651,17 @@ type ScrapeConfig struct { // The protocols to negotiate during a scrape. It tells clients what // protocol are accepted by Prometheus and with what preference (most wanted is first). // Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, - // OpenMetricsText1.0.0, PrometheusText0.0.4. + // OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4. ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"` - // Whether to scrape a classic histogram that is also exposed as a native histogram. - ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"` + // The fallback protocol to use if the Content-Type provided by the target + // is not provided, blank, or not one of the expected values. + // Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, + // OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4. + ScrapeFallbackProtocol ScrapeProtocol `yaml:"fallback_scrape_protocol,omitempty"` + // Whether to scrape a classic histogram, even if it is also exposed as a native histogram. + AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"` + // Whether to convert all scraped classic histograms into a native histogram with custom buckets. + ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"` // File to which scrape failures are logged. ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"` // The HTTP resource path on which to fetch metrics from targets. @@ -783,11 +809,17 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName) } + if c.ScrapeFallbackProtocol != "" { + if err := c.ScrapeFallbackProtocol.Validate(); err != nil { + return fmt.Errorf("invalid fallback_scrape_protocol for scrape config with job name %q: %w", c.JobName, err) + } + } + switch globalConfig.MetricNameValidationScheme { - case "", LegacyValidationConfig: - case UTF8ValidationConfig: + case LegacyValidationConfig: + case "", UTF8ValidationConfig: if model.NameValidationScheme != model.UTF8Validation { - return fmt.Errorf("utf8 name validation requested but feature not enabled via --enable-feature=utf8-names") + panic("utf8 name validation requested but model.NameValidationScheme is not set to UTF8") } default: return fmt.Errorf("unknown name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme) @@ -958,6 +990,7 @@ func (a AlertmanagerConfigs) ToMap() map[string]*AlertmanagerConfig { // AlertmanagerAPIVersion represents a version of the // github.com/prometheus/alertmanager/api, e.g. 'v1' or 'v2'. +// 'v1' is no longer supported. type AlertmanagerAPIVersion string // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -987,7 +1020,7 @@ const ( ) var SupportedAlertmanagerAPIVersions = []AlertmanagerAPIVersion{ - AlertmanagerAPIVersionV1, AlertmanagerAPIVersionV2, + AlertmanagerAPIVersionV2, } // AlertmanagerConfig configures how Alertmanagers can be discovered and communicated with. diff --git a/vendor/github.com/prometheus/prometheus/config/reload.go b/vendor/github.com/prometheus/prometheus/config/reload.go new file mode 100644 index 00000000000..8be1b28d8ab --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/config/reload.go @@ -0,0 +1,92 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "os" + "path/filepath" + + "gopkg.in/yaml.v2" +) + +type ExternalFilesConfig struct { + RuleFiles []string `yaml:"rule_files"` + ScrapeConfigFiles []string `yaml:"scrape_config_files"` +} + +// GenerateChecksum generates a checksum of the YAML file and the files it references. +func GenerateChecksum(yamlFilePath string) (string, error) { + hash := sha256.New() + + yamlContent, err := os.ReadFile(yamlFilePath) + if err != nil { + return "", fmt.Errorf("error reading YAML file: %w", err) + } + _, err = hash.Write(yamlContent) + if err != nil { + return "", fmt.Errorf("error writing YAML file to hash: %w", err) + } + + var config ExternalFilesConfig + if err := yaml.Unmarshal(yamlContent, &config); err != nil { + return "", fmt.Errorf("error unmarshalling YAML: %w", err) + } + + dir := filepath.Dir(yamlFilePath) + + for i, file := range config.RuleFiles { + config.RuleFiles[i] = filepath.Join(dir, file) + } + for i, file := range config.ScrapeConfigFiles { + config.ScrapeConfigFiles[i] = filepath.Join(dir, file) + } + + files := map[string][]string{ + "r": config.RuleFiles, // "r" for rule files + "s": config.ScrapeConfigFiles, // "s" for scrape config files + } + + for _, prefix := range []string{"r", "s"} { + for _, pattern := range files[prefix] { + matchingFiles, err := filepath.Glob(pattern) + if err != nil { + return "", fmt.Errorf("error finding files with pattern %q: %w", pattern, err) + } + + for _, file := range matchingFiles { + // Write prefix to the hash ("r" or "s") followed by \0, then + // the file path. + _, err = hash.Write([]byte(prefix + "\x00" + file + "\x00")) + if err != nil { + return "", fmt.Errorf("error writing %q path to hash: %w", file, err) + } + + // Read and hash the content of the file. + content, err := os.ReadFile(file) + if err != nil { + return "", fmt.Errorf("error reading file %s: %w", file, err) + } + _, err = hash.Write(append(content, []byte("\x00")...)) + if err != nil { + return "", fmt.Errorf("error writing %q content to hash: %w", file, err) + } + } + } + } + + return hex.EncodeToString(hash.Sum(nil)), nil +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/README.md b/vendor/github.com/prometheus/prometheus/discovery/README.md index 4c066086256..d5418e7fb11 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/README.md +++ b/vendor/github.com/prometheus/prometheus/discovery/README.md @@ -233,7 +233,7 @@ type Config interface { } type DiscovererOptions struct { - Logger log.Logger + Logger *slog.Logger // A registerer for the Discoverer's metrics. Registerer prometheus.Registerer diff --git a/vendor/github.com/prometheus/prometheus/discovery/discovery.go b/vendor/github.com/prometheus/prometheus/discovery/discovery.go index a91faf6c864..c400de3632f 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/discovery.go +++ b/vendor/github.com/prometheus/prometheus/discovery/discovery.go @@ -15,9 +15,9 @@ package discovery import ( "context" + "log/slog" "reflect" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -47,7 +47,7 @@ type DiscovererMetrics interface { // DiscovererOptions provides options for a Discoverer. type DiscovererOptions struct { - Logger log.Logger + Logger *slog.Logger Metrics DiscovererMetrics @@ -109,7 +109,7 @@ func (c *Configs) SetDirectory(dir string) { // UnmarshalYAML implements yaml.Unmarshaler. func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { - cfgTyp := getConfigType(configsType) + cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() @@ -124,7 +124,7 @@ func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { // MarshalYAML implements yaml.Marshaler. func (c Configs) MarshalYAML() (interface{}, error) { - cfgTyp := getConfigType(configsType) + cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index cefa90a8669..87e0ecc44b5 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -16,14 +16,14 @@ package discovery import ( "context" "fmt" + "log/slog" "reflect" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -81,9 +81,9 @@ func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]Discovere } // NewManager is the Discovery Manager constructor. -func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager { +func NewManager(ctx context.Context, logger *slog.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } mgr := &Manager{ logger: logger, @@ -104,7 +104,7 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil { mgr.metrics = metrics } else { - level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err) + logger.Error("Failed to create discovery manager metrics", "manager", mgr.name, "err", err) return nil } @@ -141,7 +141,7 @@ func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) { // Manager maintains a set of discovery providers and sends each update to a map channel. // Targets are grouped by the target set name. type Manager struct { - logger log.Logger + logger *slog.Logger name string httpOpts []config.HTTPClientOption mtx sync.RWMutex @@ -294,7 +294,7 @@ func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker D } func (m *Manager) startProvider(ctx context.Context, p *Provider) { - level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) + m.logger.Debug("Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) ctx, cancel := context.WithCancel(ctx) updates := make(chan []*targetgroup.Group) @@ -328,7 +328,7 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ case tgs, ok := <-updates: m.metrics.ReceivedUpdates.Inc() if !ok { - level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) + m.logger.Debug("Discoverer channel closed", "provider", p.name) // Wait for provider cancellation to ensure targets are cleaned up when expected. <-ctx.Done() return @@ -364,7 +364,7 @@ func (m *Manager) sender() { case m.syncCh <- m.allGroups(): default: m.metrics.DelayedUpdates.Inc() - level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") + m.logger.Debug("Discovery receiver's channel was full so will retry the next cycle") select { case m.triggerSend <- struct{}{}: default: @@ -458,12 +458,12 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { } typ := cfg.Name() d, err := cfg.NewDiscoverer(DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ, "config", setName), + Logger: m.logger.With("discovery", typ, "config", setName), HTTPClientOptions: m.httpOpts, Metrics: m.sdMetrics[typ], }) if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) + m.logger.Error("Cannot create service discovery", "err", err, "type", typ, "config", setName) failed++ return } diff --git a/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go b/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go index f037a90cff0..31646c0e4c1 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go +++ b/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go @@ -16,17 +16,17 @@ package refresh import ( "context" "errors" + "log/slog" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) type Options struct { - Logger log.Logger + Logger *slog.Logger Mech string Interval time.Duration RefreshF func(ctx context.Context) ([]*targetgroup.Group, error) @@ -35,7 +35,7 @@ type Options struct { // Discovery implements the Discoverer interface. type Discovery struct { - logger log.Logger + logger *slog.Logger interval time.Duration refreshf func(ctx context.Context) ([]*targetgroup.Group, error) metrics *discovery.RefreshMetrics @@ -45,9 +45,9 @@ type Discovery struct { func NewDiscovery(opts Options) *Discovery { m := opts.MetricsInstantiator.Instantiate(opts.Mech) - var logger log.Logger + var logger *slog.Logger if opts.Logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } else { logger = opts.Logger } @@ -68,7 +68,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { tgs, err := d.refresh(ctx) if err != nil { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(d.logger).Log("msg", "Unable to refresh target groups", "err", err.Error()) + d.logger.Error("Unable to refresh target groups", "err", err.Error()) } } else { select { @@ -87,7 +87,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { tgs, err := d.refresh(ctx) if err != nil { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(d.logger).Log("msg", "Unable to refresh target groups", "err", err.Error()) + d.logger.Error("Unable to refresh target groups", "err", err.Error()) } continue } diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go index 300f3176e42..a6ad47acd3b 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go @@ -304,6 +304,14 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { h.ZeroCount /= scalar h.Count /= scalar h.Sum /= scalar + // Division by zero removes all buckets. + if scalar == 0 { + h.PositiveBuckets = nil + h.NegativeBuckets = nil + h.PositiveSpans = nil + h.NegativeSpans = nil + return h + } for i := range h.PositiveBuckets { h.PositiveBuckets[i] /= scalar } @@ -353,7 +361,7 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) { default: // All other cases shouldn't actually happen. // They are a direct collision of CounterReset and NotCounterReset. - // Conservatively set the CounterResetHint to "unknown" and isse a warning. + // Conservatively set the CounterResetHint to "unknown" and issue a warning. h.CounterResetHint = UnknownCounterReset // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place } @@ -669,7 +677,7 @@ func detectReset(currIt, prevIt *floatBucketIterator) bool { if !currIt.Next() { // Reached end of currIt early, therefore // previous histogram has a bucket that the - // current one does not have. Unlass all + // current one does not have. Unless all // remaining buckets in the previous histogram // are unpopulated, this is a reset. for { @@ -902,7 +910,7 @@ func (h *FloatHistogram) trimBucketsInZeroBucket() { // reconcileZeroBuckets finds a zero bucket large enough to include the zero // buckets of both histograms (the receiving histogram and the other histogram) // with a zero threshold that is not within a populated bucket in either -// histogram. This method modifies the receiving histogram accourdingly, but +// histogram. This method modifies the receiving histogram accordingly, but // leaves the other histogram as is. Instead, it returns the zero count the // other histogram would have if it were modified. func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go index d7bdc1e0768..99529a38367 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go @@ -230,5 +230,5 @@ func contains(s []Label, n string) bool { } func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) } diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go index c8bce51234a..c64bb990e02 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go @@ -16,7 +16,6 @@ package labels import ( - "reflect" "slices" "strings" "unsafe" @@ -299,10 +298,8 @@ func Equal(ls, o Labels) bool { func EmptyLabels() Labels { return Labels{} } -func yoloBytes(s string) (b []byte) { - *(*string)(unsafe.Pointer(&b)) = s - (*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s) - return +func yoloBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) } // New returns a sorted Labels from the given labels. @@ -338,8 +335,8 @@ func Compare(a, b Labels) int { } i := 0 // First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned. - sp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&shorter)).Data) - lp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&longer)).Data) + sp := unsafe.Pointer(unsafe.StringData(shorter)) + lp := unsafe.Pointer(unsafe.StringData(longer)) for ; i < len(shorter)-8; i += 8 { if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) { break diff --git a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go index 3238190e989..0847c819c59 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go @@ -100,13 +100,13 @@ func newFastRegexMatcherWithoutCache(v string) (*FastRegexMatcher, error) { // available, even if the string matcher is faster. m.matchString = m.stringMatcher.Matches } else { - parsed, err := syntax.Parse(v, syntax.Perl) + parsed, err := syntax.Parse(v, syntax.Perl|syntax.DotNL) if err != nil { return nil, err } // Simplify the syntax tree to run faster. parsed = parsed.Simplify() - m.re, err = regexp.Compile("^(?:" + parsed.String() + ")$") + m.re, err = regexp.Compile("^(?s:" + parsed.String() + ")$") if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index a880465969a..eb79f7be21c 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -171,7 +171,7 @@ type Regexp struct { // NewRegexp creates a new anchored Regexp and returns an error if the // passed-in regular expression does not compile. func NewRegexp(s string) (Regexp, error) { - regex, err := regexp.Compile("^(?:" + s + ")$") + regex, err := regexp.Compile("^(?s:" + s + ")$") return Regexp{Regexp: regex}, err } @@ -218,8 +218,8 @@ func (re Regexp) String() string { } str := re.Regexp.String() - // Trim the anchor `^(?:` prefix and `)$` suffix. - return str[4 : len(str)-2] + // Trim the anchor `^(?s:` prefix and `)$` suffix. + return str[5 : len(str)-2] } // Process returns a relabeled version of the given label set. The relabel configurations @@ -277,6 +277,13 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return false } case Replace: + // Fast path to add or delete label pair. + if val == "" && cfg.Regex == DefaultRelabelConfig.Regex && + !varInRegexTemplate(cfg.TargetLabel) && !varInRegexTemplate(cfg.Replacement) { + lb.Set(cfg.TargetLabel, cfg.Replacement) + break + } + indexes := cfg.Regex.FindStringSubmatchIndex(val) // If there is no match no replacement must take place. if indexes == nil { @@ -326,3 +333,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return true } + +func varInRegexTemplate(template string) bool { + return strings.Contains(template, "$") +} diff --git a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go index ec00b935401..4f6ae43f736 100644 --- a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go @@ -111,6 +111,20 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { ) } + for k, v := range g.Labels { + if !model.LabelName(k).IsValid() || k == model.MetricNameLabel { + errs = append( + errs, fmt.Errorf("invalid label name: %s", k), + ) + } + + if !model.LabelValue(v).IsValid() { + errs = append( + errs, fmt.Errorf("invalid label value: %s", v), + ) + } + } + set[g.Name] = struct{}{} for i, r := range g.Rules { @@ -143,10 +157,11 @@ type RuleGroup struct { EvaluationDelay *model.Duration `yaml:"evaluation_delay,omitempty"` QueryOffset *model.Duration `yaml:"query_offset,omitempty"` - Limit int `yaml:"limit,omitempty"` - Rules []RuleNode `yaml:"rules"` - SourceTenants []string `yaml:"source_tenants,omitempty"` - AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty"` + Limit int `yaml:"limit,omitempty"` + Rules []RuleNode `yaml:"rules"` + Labels map[string]string `yaml:"labels,omitempty"` + SourceTenants []string `yaml:"source_tenants,omitempty"` + AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty"` } // Rule describes an alerting or recording rule. diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go index 0b5d9281e4d..26828552819 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go @@ -14,6 +14,8 @@ package textparse import ( + "errors" + "fmt" "mime" "github.com/prometheus/common/model" @@ -23,8 +25,7 @@ import ( "github.com/prometheus/prometheus/model/labels" ) -// Parser parses samples from a byte slice of samples in the official -// Prometheus and OpenMetrics text exposition formats. +// Parser parses samples from a byte slice of samples in different exposition formats. type Parser interface { // Series returns the bytes of a series with a simple float64 as a // value, the timestamp if set, and the value of the current sample. @@ -58,6 +59,8 @@ type Parser interface { // Metric writes the labels of the current sample into the passed labels. // It returns the string from which the metric was parsed. + // The values of the "le" labels of classic histograms and "quantile" labels + // of summaries should follow the OpenMetrics formatting rules. Metric(l *labels.Labels) string // Exemplar writes the exemplar of the current sample into the passed @@ -69,6 +72,8 @@ type Parser interface { // CreatedTimestamp returns the created timestamp (in milliseconds) for the // current sample. It returns nil if it is unknown e.g. if it wasn't set, // if the scrape protocol or metric type does not support created timestamps. + // Assume the CreatedTimestamp returned pointer is only valid until + // the Next iteration. CreatedTimestamp() *int64 // Next advances the parser to the next sample. @@ -76,26 +81,65 @@ type Parser interface { Next() (Entry, error) } -// New returns a new parser of the byte slice. -// -// This function always returns a valid parser, but might additionally -// return an error if the content type cannot be parsed. -func New(b []byte, contentType string, parseClassicHistograms bool, st *labels.SymbolTable) (Parser, error) { +// extractMediaType returns the mediaType of a required parser. It tries first to +// extract a valid and supported mediaType from contentType. If that fails, +// the provided fallbackType (possibly an empty string) is returned, together with +// an error. fallbackType is used as-is without further validation. +func extractMediaType(contentType, fallbackType string) (string, error) { if contentType == "" { - return NewPromParser(b, st), nil + if fallbackType == "" { + return "", errors.New("non-compliant scrape target sending blank Content-Type and no fallback_scrape_protocol specified for target") + } + return fallbackType, fmt.Errorf("non-compliant scrape target sending blank Content-Type, using fallback_scrape_protocol %q", fallbackType) } + // We have a contentType, parse it. mediaType, _, err := mime.ParseMediaType(contentType) if err != nil { - return NewPromParser(b, st), err + if fallbackType == "" { + retErr := fmt.Errorf("cannot parse Content-Type %q and no fallback_scrape_protocol for target", contentType) + return "", errors.Join(retErr, err) + } + retErr := fmt.Errorf("could not parse received Content-Type %q, using fallback_scrape_protocol %q", contentType, fallbackType) + return fallbackType, errors.Join(retErr, err) + } + + // We have a valid media type, either we recognise it and can use it + // or we have to error. + switch mediaType { + case "application/openmetrics-text", "application/vnd.google.protobuf", "text/plain": + return mediaType, nil + } + // We're here because we have no recognised mediaType. + if fallbackType == "" { + return "", fmt.Errorf("received unsupported Content-Type %q and no fallback_scrape_protocol specified for target", contentType) } + return fallbackType, fmt.Errorf("received unsupported Content-Type %q, using fallback_scrape_protocol %q", contentType, fallbackType) +} + +// New returns a new parser of the byte slice. +// +// This function no longer guarantees to return a valid parser. +// +// It only returns a valid parser if the supplied contentType and fallbackType allow. +// An error may also be returned if fallbackType had to be used or there was some +// other error parsing the supplied Content-Type. +// If the returned parser is nil then the scrape must fail. +func New(b []byte, contentType, fallbackType string, parseClassicHistograms, skipOMCTSeries bool, st *labels.SymbolTable) (Parser, error) { + mediaType, err := extractMediaType(contentType, fallbackType) + // err may be nil or something we want to warn about. + switch mediaType { case "application/openmetrics-text": - return NewOpenMetricsParser(b, st), nil + return NewOpenMetricsParser(b, st, func(o *openMetricsParserOptions) { + o.SkipCTSeries = skipOMCTSeries + }), err case "application/vnd.google.protobuf": - return NewProtobufParser(b, parseClassicHistograms, st), nil + return NewProtobufParser(b, parseClassicHistograms, st), err + case "text/plain": + return NewPromParser(b, st), err default: - return NewPromParser(b, st), nil + return nil, err } } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go new file mode 100644 index 00000000000..d019c327c37 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go @@ -0,0 +1,376 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "errors" + "io" + "math" + "strconv" + "strings" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/convertnhcb" +) + +type collectionState int + +const ( + stateStart collectionState = iota + stateCollecting + stateEmitting +) + +// The NHCBParser wraps a Parser and converts classic histograms to native +// histograms with custom buckets. +// +// Since Parser interface is line based, this parser needs to keep track +// of the last classic histogram series it saw to collate them into a +// single native histogram. +// +// Note: +// - Only series that have the histogram metadata type are considered for +// conversion. +// - The classic series are also returned if keepClassicHistograms is true. +type NHCBParser struct { + // The parser we're wrapping. + parser Parser + // Option to keep classic histograms along with converted histograms. + keepClassicHistograms bool + + // Labels builder. + builder labels.ScratchBuilder + + // State of the parser. + state collectionState + + // Caches the values from the underlying parser. + // For Series and Histogram. + bytes []byte + ts *int64 + value float64 + h *histogram.Histogram + fh *histogram.FloatHistogram + // For Metric. + lset labels.Labels + metricString string + // For Type. + bName []byte + typ model.MetricType + + // Caches the entry itself if we are inserting a converted NHCB + // halfway through. + entry Entry + err error + + // Caches the values and metric for the inserted converted NHCB. + bytesNHCB []byte + hNHCB *histogram.Histogram + fhNHCB *histogram.FloatHistogram + lsetNHCB labels.Labels + exemplars []exemplar.Exemplar + ctNHCB *int64 + metricStringNHCB string + + // Collates values from the classic histogram series to build + // the converted histogram later. + tempLsetNHCB labels.Labels + tempNHCB convertnhcb.TempHistogram + tempExemplars []exemplar.Exemplar + tempExemplarCount int + tempCT *int64 + + // Remembers the last base histogram metric name (assuming it's + // a classic histogram) so we can tell if the next float series + // is part of the same classic histogram. + lastHistogramName string + lastHistogramLabelsHash uint64 + lastHistogramExponential bool + // Reused buffer for hashing labels. + hBuffer []byte +} + +func NewNHCBParser(p Parser, st *labels.SymbolTable, keepClassicHistograms bool) Parser { + return &NHCBParser{ + parser: p, + keepClassicHistograms: keepClassicHistograms, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + tempNHCB: convertnhcb.NewTempHistogram(), + } +} + +func (p *NHCBParser) Series() ([]byte, *int64, float64) { + return p.bytes, p.ts, p.value +} + +func (p *NHCBParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { + if p.state == stateEmitting { + return p.bytesNHCB, p.ts, p.hNHCB, p.fhNHCB + } + return p.bytes, p.ts, p.h, p.fh +} + +func (p *NHCBParser) Help() ([]byte, []byte) { + return p.parser.Help() +} + +func (p *NHCBParser) Type() ([]byte, model.MetricType) { + return p.bName, p.typ +} + +func (p *NHCBParser) Unit() ([]byte, []byte) { + return p.parser.Unit() +} + +func (p *NHCBParser) Comment() []byte { + return p.parser.Comment() +} + +func (p *NHCBParser) Metric(l *labels.Labels) string { + if p.state == stateEmitting { + *l = p.lsetNHCB + return p.metricStringNHCB + } + *l = p.lset + return p.metricString +} + +func (p *NHCBParser) Exemplar(ex *exemplar.Exemplar) bool { + if p.state == stateEmitting { + if len(p.exemplars) == 0 { + return false + } + *ex = p.exemplars[0] + p.exemplars = p.exemplars[1:] + return true + } + return p.parser.Exemplar(ex) +} + +func (p *NHCBParser) CreatedTimestamp() *int64 { + switch p.state { + case stateStart: + if p.entry == EntrySeries || p.entry == EntryHistogram { + return p.parser.CreatedTimestamp() + } + case stateCollecting: + return p.tempCT + case stateEmitting: + return p.ctNHCB + } + return nil +} + +func (p *NHCBParser) Next() (Entry, error) { + if p.state == stateEmitting { + p.state = stateStart + if p.entry == EntrySeries { + isNHCB := p.handleClassicHistogramSeries(p.lset) + if isNHCB && !p.keepClassicHistograms { + // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. + return p.Next() + } + } + return p.entry, p.err + } + + p.entry, p.err = p.parser.Next() + if p.err != nil { + if errors.Is(p.err, io.EOF) && p.processNHCB() { + return EntryHistogram, nil + } + return EntryInvalid, p.err + } + switch p.entry { + case EntrySeries: + p.bytes, p.ts, p.value = p.parser.Series() + p.metricString = p.parser.Metric(&p.lset) + // Check the label set to see if we can continue or need to emit the NHCB. + var isNHCB bool + if p.compareLabels() { + // Labels differ. Check if we can emit the NHCB. + if p.processNHCB() { + return EntryHistogram, nil + } + isNHCB = p.handleClassicHistogramSeries(p.lset) + } else { + // Labels are the same. Check if after an exponential histogram. + if p.lastHistogramExponential { + isNHCB = false + } else { + isNHCB = p.handleClassicHistogramSeries(p.lset) + } + } + if isNHCB && !p.keepClassicHistograms { + // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. + return p.Next() + } + return p.entry, p.err + case EntryHistogram: + p.bytes, p.ts, p.h, p.fh = p.parser.Histogram() + p.metricString = p.parser.Metric(&p.lset) + p.storeExponentialLabels() + case EntryType: + p.bName, p.typ = p.parser.Type() + } + if p.processNHCB() { + return EntryHistogram, nil + } + return p.entry, p.err +} + +// Return true if labels have changed and we should emit the NHCB. +func (p *NHCBParser) compareLabels() bool { + if p.state != stateCollecting { + return false + } + if p.typ != model.MetricTypeHistogram { + // Different metric type. + return true + } + if p.lastHistogramName != convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName)) { + // Different metric name. + return true + } + nextHash, _ := p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel) + // Different label values. + return p.lastHistogramLabelsHash != nextHash +} + +// Save the label set of the classic histogram without suffix and bucket `le` label. +func (p *NHCBParser) storeClassicLabels() { + p.lastHistogramName = convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName)) + p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel) + p.lastHistogramExponential = false +} + +func (p *NHCBParser) storeExponentialLabels() { + p.lastHistogramName = p.lset.Get(labels.MetricName) + p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer) + p.lastHistogramExponential = true +} + +// handleClassicHistogramSeries collates the classic histogram series to be converted to NHCB +// if it is actually a classic histogram series (and not a normal float series) and if there +// isn't already a native histogram with the same name (assuming it is always processed +// right before the classic histograms) and returns true if the collation was done. +func (p *NHCBParser) handleClassicHistogramSeries(lset labels.Labels) bool { + if p.typ != model.MetricTypeHistogram { + return false + } + mName := lset.Get(labels.MetricName) + // Sanity check to ensure that the TYPE metadata entry name is the same as the base name. + if convertnhcb.GetHistogramMetricBaseName(mName) != string(p.bName) { + return false + } + switch { + case strings.HasSuffix(mName, "_bucket") && lset.Has(labels.BucketLabel): + le, err := strconv.ParseFloat(lset.Get(labels.BucketLabel), 64) + if err == nil && !math.IsNaN(le) { + p.processClassicHistogramSeries(lset, "_bucket", func(hist *convertnhcb.TempHistogram) { + hist.BucketCounts[le] = p.value + }) + return true + } + case strings.HasSuffix(mName, "_count"): + p.processClassicHistogramSeries(lset, "_count", func(hist *convertnhcb.TempHistogram) { + hist.Count = p.value + }) + return true + case strings.HasSuffix(mName, "_sum"): + p.processClassicHistogramSeries(lset, "_sum", func(hist *convertnhcb.TempHistogram) { + hist.Sum = p.value + }) + return true + } + return false +} + +func (p *NHCBParser) processClassicHistogramSeries(lset labels.Labels, suffix string, updateHist func(*convertnhcb.TempHistogram)) { + if p.state != stateCollecting { + p.storeClassicLabels() + p.tempCT = p.parser.CreatedTimestamp() + p.state = stateCollecting + } + p.tempLsetNHCB = convertnhcb.GetHistogramMetricBase(lset, suffix) + p.storeExemplars() + updateHist(&p.tempNHCB) +} + +func (p *NHCBParser) storeExemplars() { + for ex := p.nextExemplarPtr(); p.parser.Exemplar(ex); ex = p.nextExemplarPtr() { + p.tempExemplarCount++ + } +} + +func (p *NHCBParser) nextExemplarPtr() *exemplar.Exemplar { + switch { + case p.tempExemplarCount == len(p.tempExemplars)-1: + // Reuse the previously allocated exemplar, it was not filled up. + case len(p.tempExemplars) == cap(p.tempExemplars): + // Let the runtime grow the slice. + p.tempExemplars = append(p.tempExemplars, exemplar.Exemplar{}) + default: + // Take the next element into use. + p.tempExemplars = p.tempExemplars[:len(p.tempExemplars)+1] + } + return &p.tempExemplars[len(p.tempExemplars)-1] +} + +func (p *NHCBParser) swapExemplars() { + p.exemplars = p.tempExemplars[:p.tempExemplarCount] + p.tempExemplars = p.tempExemplars[:0] + p.tempExemplarCount = 0 +} + +// processNHCB converts the collated classic histogram series to NHCB and caches the info +// to be returned to callers. Retruns true if the conversion was successful. +func (p *NHCBParser) processNHCB() bool { + if p.state != stateCollecting { + return false + } + ub := make([]float64, 0, len(p.tempNHCB.BucketCounts)) + for b := range p.tempNHCB.BucketCounts { + ub = append(ub, b) + } + upperBounds, hBase := convertnhcb.ProcessUpperBoundsAndCreateBaseHistogram(ub, false) + fhBase := hBase.ToFloat(nil) + h, fh := convertnhcb.NewHistogram(p.tempNHCB, upperBounds, hBase, fhBase) + if h != nil { + if err := h.Validate(); err != nil { + return false + } + p.hNHCB = h + p.fhNHCB = nil + } else if fh != nil { + if err := fh.Validate(); err != nil { + return false + } + p.hNHCB = nil + p.fhNHCB = fh + } + p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName) + strings.ReplaceAll(p.tempLsetNHCB.DropMetricName().String(), ", ", ",") + p.bytesNHCB = []byte(p.metricStringNHCB) + p.lsetNHCB = p.tempLsetNHCB + p.swapExemplars() + p.ctNHCB = p.tempCT + p.tempNHCB = convertnhcb.NewTempHistogram() + p.state = stateEmitting + p.tempCT = nil + return true +} diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go index 5f0415d3ee9..3ae9c7ddfc3 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -17,13 +17,16 @@ package textparse import ( + "bytes" "errors" "fmt" "io" "math" + "strconv" "strings" "unicode/utf8" + "github.com/cespare/xxhash/v2" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" @@ -72,15 +75,16 @@ func (l *openMetricsLexer) Error(es string) { // OpenMetrics text exposition format. // This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit type OpenMetricsParser struct { - l *openMetricsLexer - builder labels.ScratchBuilder - series []byte - text []byte - mtype model.MetricType - val float64 - ts int64 - hasTS bool - start int + l *openMetricsLexer + builder labels.ScratchBuilder + series []byte + mfNameLen int // length of metric family name to get from series. + text []byte + mtype model.MetricType + val float64 + ts int64 + hasTS bool + start int // offsets is a list of offsets into series that describe the positions // of the metric name and label names and values for this series. // p.offsets[0] is the start character of the metric name. @@ -95,7 +99,15 @@ type OpenMetricsParser struct { exemplarTs int64 hasExemplarTs bool - skipCTSeries bool + // Created timestamp parsing state. + ct int64 + ctHashSet uint64 + // ignoreExemplar instructs the parser to not overwrite exemplars (to keep them while peeking ahead). + ignoreExemplar bool + // visitedMFName is the metric family name of the last visited metric when peeking ahead + // for _created series during the execution of the CreatedTimestamp method. + visitedMFName []byte + skipCTSeries bool } type openMetricsParserOptions struct { @@ -201,7 +213,7 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string { label := unreplace(s[a:b]) c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start - value := unreplace(s[c:d]) + value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d])) p.builder.Add(label, value) } @@ -252,87 +264,144 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { // CreatedTimestamp returns the created timestamp for a current Metric if exists or nil. // NOTE(Maniktherana): Might use additional CPU/mem resources due to deep copy of parser required for peeking given 1.0 OM specification on _created series. func (p *OpenMetricsParser) CreatedTimestamp() *int64 { - if !TypeRequiresCT(p.mtype) { + if !typeRequiresCT(p.mtype) { // Not a CT supported metric type, fast path. + p.ctHashSet = 0 // Use ctHashSet as a single way of telling "empty cache" return nil } var ( - currLset labels.Labels - buf []byte - peekWithoutNameLsetHash uint64 + buf []byte + currName []byte ) - p.Metric(&currLset) - currFamilyLsetHash, buf := currLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") - // Search for the _created line for the currFamilyLsetHash using ephemeral parser until - // we see EOF or new metric family. We have to do it as we don't know where (and if) - // that CT line is. - // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. - peek := deepCopy(p) + if len(p.series) > 1 && p.series[0] == '{' && p.series[1] == '"' { + // special case for UTF-8 encoded metric family names. + currName = p.series[p.offsets[0]-p.start : p.mfNameLen+2] + } else { + currName = p.series[p.offsets[0]-p.start : p.mfNameLen] + } + + currHash := p.seriesHash(&buf, currName) + // Check cache, perhaps we fetched something already. + if currHash == p.ctHashSet && p.ct > 0 { + return &p.ct + } + + // Create a new lexer to reset the parser once this function is done executing. + resetLexer := &openMetricsLexer{ + b: p.l.b, + i: p.l.i, + start: p.l.start, + err: p.l.err, + state: p.l.state, + } + + p.skipCTSeries = false + + p.ignoreExemplar = true + savedStart := p.start + defer func() { + p.ignoreExemplar = false + p.start = savedStart + p.l = resetLexer + }() + for { - eType, err := peek.Next() + eType, err := p.Next() if err != nil { - // This means peek will give error too later on, so def no CT line found. + // This means p.Next() will give error too later on, so def no CT line found. // This might result in partial scrape with wrong/missing CT, but only // spec improvement would help. - // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + // TODO: Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + p.resetCTParseValues() return nil } if eType != EntrySeries { // Assume we hit different family, no CT line found. + p.resetCTParseValues() return nil } - var peekedLset labels.Labels - peek.Metric(&peekedLset) - peekedName := peekedLset.Get(model.MetricNameLabel) - if !strings.HasSuffix(peekedName, "_created") { + peekedName := p.series[p.offsets[0]-p.start : p.offsets[1]-p.start] + if len(peekedName) < 8 || string(peekedName[len(peekedName)-8:]) != "_created" { // Not a CT line, search more. continue } - // We got a CT line here, but let's search if CT line is actually for our series, edge case. - peekWithoutNameLsetHash, _ = peekedLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") - if peekWithoutNameLsetHash != currFamilyLsetHash { - // CT line for a different series, for our series no CT. + // Remove _created suffix. + peekedHash := p.seriesHash(&buf, peekedName[:len(peekedName)-8]) + if peekedHash != currHash { + // Found CT line for a different series, for our series no CT. + p.resetCTParseValues() return nil } - ct := int64(peek.val) + + // All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds. + // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#timestamps + ct := int64(p.val * 1000.0) + p.setCTParseValues(ct, currHash, currName, true) return &ct } } -// TypeRequiresCT returns true if the metric type requires a _created timestamp. -func TypeRequiresCT(t model.MetricType) bool { - switch t { - case model.MetricTypeCounter, model.MetricTypeSummary, model.MetricTypeHistogram: - return true - default: - return false +var ( + leBytes = []byte{108, 101} + quantileBytes = []byte{113, 117, 97, 110, 116, 105, 108, 101} +) + +// seriesHash generates a hash based on the metric family name and the offsets +// of label names and values from the parsed OpenMetrics data. It skips quantile +// and le labels for summaries and histograms respectively. +func (p *OpenMetricsParser) seriesHash(offsetsArr *[]byte, metricFamilyName []byte) uint64 { + // Iterate through p.offsets to find the label names and values. + for i := 2; i < len(p.offsets); i += 4 { + lStart := p.offsets[i] - p.start + lEnd := p.offsets[i+1] - p.start + label := p.series[lStart:lEnd] + // Skip quantile and le labels for summaries and histograms. + if p.mtype == model.MetricTypeSummary && bytes.Equal(label, quantileBytes) { + continue + } + if p.mtype == model.MetricTypeHistogram && bytes.Equal(label, leBytes) { + continue + } + *offsetsArr = append(*offsetsArr, p.series[lStart:lEnd]...) + vStart := p.offsets[i+2] - p.start + vEnd := p.offsets[i+3] - p.start + *offsetsArr = append(*offsetsArr, p.series[vStart:vEnd]...) } + + *offsetsArr = append(*offsetsArr, metricFamilyName...) + hashedOffsets := xxhash.Sum64(*offsetsArr) + + // Reset the offsets array for later reuse. + *offsetsArr = (*offsetsArr)[:0] + return hashedOffsets } -// deepCopy creates a copy of a parser without re-using the slices' original memory addresses. -func deepCopy(p *OpenMetricsParser) OpenMetricsParser { - newB := make([]byte, len(p.l.b)) - copy(newB, p.l.b) +// setCTParseValues sets the parser to the state after CreatedTimestamp method was called and CT was found. +// This is useful to prevent re-parsing the same series again and early return the CT value. +func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, mfName []byte, skipCTSeries bool) { + p.ct = ct + p.ctHashSet = ctHashSet + p.visitedMFName = mfName + p.skipCTSeries = skipCTSeries // Do we need to set it? +} - newLexer := &openMetricsLexer{ - b: newB, - i: p.l.i, - start: p.l.start, - err: p.l.err, - state: p.l.state, - } +// resetCtParseValues resets the parser to the state before CreatedTimestamp method was called. +func (p *OpenMetricsParser) resetCTParseValues() { + p.ctHashSet = 0 + p.skipCTSeries = true +} - newParser := OpenMetricsParser{ - l: newLexer, - builder: p.builder, - mtype: p.mtype, - val: p.val, - skipCTSeries: false, +// typeRequiresCT returns true if the metric type requires a _created timestamp. +func typeRequiresCT(t model.MetricType) bool { + switch t { + case model.MetricTypeCounter, model.MetricTypeSummary, model.MetricTypeHistogram: + return true + default: + return false } - return newParser } // nextToken returns the next token from the openMetricsLexer. @@ -356,10 +425,12 @@ func (p *OpenMetricsParser) Next() (Entry, error) { p.start = p.l.i p.offsets = p.offsets[:0] - p.eOffsets = p.eOffsets[:0] - p.exemplar = p.exemplar[:0] - p.exemplarVal = 0 - p.hasExemplarTs = false + if !p.ignoreExemplar { + p.eOffsets = p.eOffsets[:0] + p.exemplar = p.exemplar[:0] + p.exemplarVal = 0 + p.hasExemplarTs = false + } switch t := p.nextToken(); t { case tEOFWord: @@ -378,6 +449,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { mStart++ mEnd-- } + p.mfNameLen = mEnd - mStart p.offsets = append(p.offsets, mStart, mEnd) default: return EntryInvalid, p.parseError("expected metric name after "+t.String(), t2) @@ -483,6 +555,16 @@ func (p *OpenMetricsParser) Next() (Entry, error) { func (p *OpenMetricsParser) parseComment() error { var err error + + if p.ignoreExemplar { + for t := p.nextToken(); t != tLinebreak; t = p.nextToken() { + if t == tEOF { + return errors.New("data does not end with # EOF") + } + } + return nil + } + // Parse the labels. p.eOffsets, err = p.parseLVals(p.eOffsets, true) if err != nil { @@ -591,10 +673,9 @@ func (p *OpenMetricsParser) parseLVals(offsets []int, isExemplar bool) ([]int, e // isCreatedSeries returns true if the current series is a _created series. func (p *OpenMetricsParser) isCreatedSeries() bool { - var newLbs labels.Labels - p.Metric(&newLbs) - name := newLbs.Get(model.MetricNameLabel) - if TypeRequiresCT(p.mtype) && strings.HasSuffix(name, "_created") { + metricName := p.series[p.offsets[0]-p.start : p.offsets[1]-p.start] + // check length so the metric is longer than len("_created") + if typeRequiresCT(p.mtype) && len(metricName) >= 8 && string(metricName[len(metricName)-8:]) == "_created" { return true } return false @@ -663,3 +744,15 @@ func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error } return val, nil } + +// normalizeFloatsInLabelValues ensures that values of the "le" labels of classic histograms and "quantile" labels +// of summaries follow OpenMetrics formatting rules. +func normalizeFloatsInLabelValues(t model.MetricType, l, v string) string { + if (t == model.MetricTypeSummary && l == model.QuantileLabel) || (t == model.MetricTypeHistogram && l == model.BucketLabel) { + f, err := strconv.ParseFloat(v, 64) + if err == nil { + return formatOpenMetricsFloat(f) + } + } + return v +} diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go index a611f3aea76..0ab932c665b 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go @@ -239,7 +239,8 @@ func (p *PromParser) Metric(l *labels.Labels) string { label := unreplace(s[a:b]) c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start - value := unreplace(s[c:d]) + value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d])) + p.builder.Add(label, value) } @@ -502,7 +503,7 @@ func unreplace(s string) string { } func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) } func parseFloat(s string) (float64, error) { diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.nometa.txt b/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.nometa.txt deleted file mode 100644 index 235f0aa464b..00000000000 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.nometa.txt +++ /dev/null @@ -1,411 +0,0 @@ -go_gc_duration_seconds{quantile="0"} 4.9351e-05 -go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 -go_gc_duration_seconds{quantile="0.5"} 8.3835e-05 -go_gc_duration_seconds{quantile="0.75"} 0.000106744 -go_gc_duration_seconds{quantile="1"} 0.002072195 -go_gc_duration_seconds_sum 0.012139815 -go_gc_duration_seconds_count 99 -go_goroutines 33 -go_memstats_alloc_bytes 1.7518624e+07 -go_memstats_alloc_bytes_total 8.3062296e+08 -go_memstats_buck_hash_sys_bytes 1.494637e+06 -go_memstats_frees_total 4.65658e+06 -go_memstats_gc_sys_bytes 1.107968e+06 -go_memstats_heap_alloc_bytes 1.7518624e+07 -go_memstats_heap_idle_bytes 6.668288e+06 -go_memstats_heap_inuse_bytes 1.8956288e+07 -go_memstats_heap_objects 72755 -go_memstats_heap_released_bytes_total 0 -go_memstats_heap_sys_bytes 2.5624576e+07 -go_memstats_last_gc_time_seconds 1.4843955586166437e+09 -go_memstats_lookups_total 2089 -go_memstats_mallocs_total 4.729335e+06 -go_memstats_mcache_inuse_bytes 9600 -go_memstats_mcache_sys_bytes 16384 -go_memstats_mspan_inuse_bytes 211520 -go_memstats_mspan_sys_bytes 245760 -go_memstats_next_gc_bytes 2.033527e+07 -go_memstats_other_sys_bytes 2.077323e+06 -go_memstats_stack_inuse_bytes 1.6384e+06 -go_memstats_stack_sys_bytes 1.6384e+06 -go_memstats_sys_bytes 3.2205048e+07 -http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN -http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN -http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="alerts"} 0 -http_request_duration_microseconds_count{handler="alerts"} 0 -http_request_duration_microseconds{handler="config",quantile="0.5"} NaN -http_request_duration_microseconds{handler="config",quantile="0.9"} NaN -http_request_duration_microseconds{handler="config",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="config"} 0 -http_request_duration_microseconds_count{handler="config"} 0 -http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN -http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN -http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="consoles"} 0 -http_request_duration_microseconds_count{handler="consoles"} 0 -http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN -http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN -http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="drop_series"} 0 -http_request_duration_microseconds_count{handler="drop_series"} 0 -http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN -http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN -http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="federate"} 0 -http_request_duration_microseconds_count{handler="federate"} 0 -http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN -http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN -http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="flags"} 0 -http_request_duration_microseconds_count{handler="flags"} 0 -http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655 -http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823 -http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823 -http_request_duration_microseconds_sum{handler="graph"} 5803.93 -http_request_duration_microseconds_count{handler="graph"} 3 -http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN -http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN -http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="heap"} 0 -http_request_duration_microseconds_count{handler="heap"} 0 -http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401 -http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708 -http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708 -http_request_duration_microseconds_sum{handler="label_values"} 3995.574 -http_request_duration_microseconds_count{handler="label_values"} 3 -http_request_duration_microseconds{handler="options",quantile="0.5"} NaN -http_request_duration_microseconds{handler="options",quantile="0.9"} NaN -http_request_duration_microseconds{handler="options",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="options"} 0 -http_request_duration_microseconds_count{handler="options"} 0 -http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859 -http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035 -http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523 -http_request_duration_microseconds_sum{handler="prometheus"} 661851.54 -http_request_duration_microseconds_count{handler="prometheus"} 462 -http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448 -http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558 -http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558 -http_request_duration_microseconds_sum{handler="query"} 26074.11 -http_request_duration_microseconds_count{handler="query"} 6 -http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN -http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN -http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="query_range"} 0 -http_request_duration_microseconds_count{handler="query_range"} 0 -http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN -http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN -http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="rules"} 0 -http_request_duration_microseconds_count{handler="rules"} 0 -http_request_duration_microseconds{handler="series",quantile="0.5"} NaN -http_request_duration_microseconds{handler="series",quantile="0.9"} NaN -http_request_duration_microseconds{handler="series",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="series"} 0 -http_request_duration_microseconds_count{handler="series"} 0 -http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311 -http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174 -http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174 -http_request_duration_microseconds_sum{handler="static"} 6458.621 -http_request_duration_microseconds_count{handler="static"} 3 -http_request_duration_microseconds{handler="status",quantile="0.5"} NaN -http_request_duration_microseconds{handler="status",quantile="0.9"} NaN -http_request_duration_microseconds{handler="status",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="status"} 0 -http_request_duration_microseconds_count{handler="status"} 0 -http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN -http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN -http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="targets"} 0 -http_request_duration_microseconds_count{handler="targets"} 0 -http_request_duration_microseconds{handler="version",quantile="0.5"} NaN -http_request_duration_microseconds{handler="version",quantile="0.9"} NaN -http_request_duration_microseconds{handler="version",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="version"} 0 -http_request_duration_microseconds_count{handler="version"} 0 -http_request_size_bytes{handler="alerts",quantile="0.5"} NaN -http_request_size_bytes{handler="alerts",quantile="0.9"} NaN -http_request_size_bytes{handler="alerts",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="alerts"} 0 -http_request_size_bytes_count{handler="alerts"} 0 -http_request_size_bytes{handler="config",quantile="0.5"} NaN -http_request_size_bytes{handler="config",quantile="0.9"} NaN -http_request_size_bytes{handler="config",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="config"} 0 -http_request_size_bytes_count{handler="config"} 0 -http_request_size_bytes{handler="consoles",quantile="0.5"} NaN -http_request_size_bytes{handler="consoles",quantile="0.9"} NaN -http_request_size_bytes{handler="consoles",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="consoles"} 0 -http_request_size_bytes_count{handler="consoles"} 0 -http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN -http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN -http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="drop_series"} 0 -http_request_size_bytes_count{handler="drop_series"} 0 -http_request_size_bytes{handler="federate",quantile="0.5"} NaN -http_request_size_bytes{handler="federate",quantile="0.9"} NaN -http_request_size_bytes{handler="federate",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="federate"} 0 -http_request_size_bytes_count{handler="federate"} 0 -http_request_size_bytes{handler="flags",quantile="0.5"} NaN -http_request_size_bytes{handler="flags",quantile="0.9"} NaN -http_request_size_bytes{handler="flags",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="flags"} 0 -http_request_size_bytes_count{handler="flags"} 0 -http_request_size_bytes{handler="graph",quantile="0.5"} 367 -http_request_size_bytes{handler="graph",quantile="0.9"} 389 -http_request_size_bytes{handler="graph",quantile="0.99"} 389 -http_request_size_bytes_sum{handler="graph"} 1145 -http_request_size_bytes_count{handler="graph"} 3 -http_request_size_bytes{handler="heap",quantile="0.5"} NaN -http_request_size_bytes{handler="heap",quantile="0.9"} NaN -http_request_size_bytes{handler="heap",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="heap"} 0 -http_request_size_bytes_count{handler="heap"} 0 -http_request_size_bytes{handler="label_values",quantile="0.5"} 416 -http_request_size_bytes{handler="label_values",quantile="0.9"} 416 -http_request_size_bytes{handler="label_values",quantile="0.99"} 416 -http_request_size_bytes_sum{handler="label_values"} 1248 -http_request_size_bytes_count{handler="label_values"} 3 -http_request_size_bytes{handler="options",quantile="0.5"} NaN -http_request_size_bytes{handler="options",quantile="0.9"} NaN -http_request_size_bytes{handler="options",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="options"} 0 -http_request_size_bytes_count{handler="options"} 0 -http_request_size_bytes{handler="prometheus",quantile="0.5"} 238 -http_request_size_bytes{handler="prometheus",quantile="0.9"} 238 -http_request_size_bytes{handler="prometheus",quantile="0.99"} 238 -http_request_size_bytes_sum{handler="prometheus"} 109956 -http_request_size_bytes_count{handler="prometheus"} 462 -http_request_size_bytes{handler="query",quantile="0.5"} 531 -http_request_size_bytes{handler="query",quantile="0.9"} 531 -http_request_size_bytes{handler="query",quantile="0.99"} 531 -http_request_size_bytes_sum{handler="query"} 3186 -http_request_size_bytes_count{handler="query"} 6 -http_request_size_bytes{handler="query_range",quantile="0.5"} NaN -http_request_size_bytes{handler="query_range",quantile="0.9"} NaN -http_request_size_bytes{handler="query_range",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="query_range"} 0 -http_request_size_bytes_count{handler="query_range"} 0 -http_request_size_bytes{handler="rules",quantile="0.5"} NaN -http_request_size_bytes{handler="rules",quantile="0.9"} NaN -http_request_size_bytes{handler="rules",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="rules"} 0 -http_request_size_bytes_count{handler="rules"} 0 -http_request_size_bytes{handler="series",quantile="0.5"} NaN -http_request_size_bytes{handler="series",quantile="0.9"} NaN -http_request_size_bytes{handler="series",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="series"} 0 -http_request_size_bytes_count{handler="series"} 0 -http_request_size_bytes{handler="static",quantile="0.5"} 379 -http_request_size_bytes{handler="static",quantile="0.9"} 379 -http_request_size_bytes{handler="static",quantile="0.99"} 379 -http_request_size_bytes_sum{handler="static"} 1137 -http_request_size_bytes_count{handler="static"} 3 -http_request_size_bytes{handler="status",quantile="0.5"} NaN -http_request_size_bytes{handler="status",quantile="0.9"} NaN -http_request_size_bytes{handler="status",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="status"} 0 -http_request_size_bytes_count{handler="status"} 0 -http_request_size_bytes{handler="targets",quantile="0.5"} NaN -http_request_size_bytes{handler="targets",quantile="0.9"} NaN -http_request_size_bytes{handler="targets",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="targets"} 0 -http_request_size_bytes_count{handler="targets"} 0 -http_request_size_bytes{handler="version",quantile="0.5"} NaN -http_request_size_bytes{handler="version",quantile="0.9"} NaN -http_request_size_bytes{handler="version",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="version"} 0 -http_request_size_bytes_count{handler="version"} 0 -http_requests_total{code="200",handler="graph",method="get"} 3 -http_requests_total{code="200",handler="label_values",method="get"} 3 -http_requests_total{code="200",handler="prometheus",method="get"} 462 -http_requests_total{code="200",handler="query",method="get"} 6 -http_requests_total{code="200",handler="static",method="get"} 3 -http_response_size_bytes{handler="alerts",quantile="0.5"} NaN -http_response_size_bytes{handler="alerts",quantile="0.9"} NaN -http_response_size_bytes{handler="alerts",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="alerts"} 0 -http_response_size_bytes_count{handler="alerts"} 0 -http_response_size_bytes{handler="config",quantile="0.5"} NaN -http_response_size_bytes{handler="config",quantile="0.9"} NaN -http_response_size_bytes{handler="config",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="config"} 0 -http_response_size_bytes_count{handler="config"} 0 -http_response_size_bytes{handler="consoles",quantile="0.5"} NaN -http_response_size_bytes{handler="consoles",quantile="0.9"} NaN -http_response_size_bytes{handler="consoles",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="consoles"} 0 -http_response_size_bytes_count{handler="consoles"} 0 -http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN -http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN -http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="drop_series"} 0 -http_response_size_bytes_count{handler="drop_series"} 0 -http_response_size_bytes{handler="federate",quantile="0.5"} NaN -http_response_size_bytes{handler="federate",quantile="0.9"} NaN -http_response_size_bytes{handler="federate",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="federate"} 0 -http_response_size_bytes_count{handler="federate"} 0 -http_response_size_bytes{handler="flags",quantile="0.5"} NaN -http_response_size_bytes{handler="flags",quantile="0.9"} NaN -http_response_size_bytes{handler="flags",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="flags"} 0 -http_response_size_bytes_count{handler="flags"} 0 -http_response_size_bytes{handler="graph",quantile="0.5"} 3619 -http_response_size_bytes{handler="graph",quantile="0.9"} 3619 -http_response_size_bytes{handler="graph",quantile="0.99"} 3619 -http_response_size_bytes_sum{handler="graph"} 10857 -http_response_size_bytes_count{handler="graph"} 3 -http_response_size_bytes{handler="heap",quantile="0.5"} NaN -http_response_size_bytes{handler="heap",quantile="0.9"} NaN -http_response_size_bytes{handler="heap",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="heap"} 0 -http_response_size_bytes_count{handler="heap"} 0 -http_response_size_bytes{handler="label_values",quantile="0.5"} 642 -http_response_size_bytes{handler="label_values",quantile="0.9"} 642 -http_response_size_bytes{handler="label_values",quantile="0.99"} 642 -http_response_size_bytes_sum{handler="label_values"} 1926 -http_response_size_bytes_count{handler="label_values"} 3 -http_response_size_bytes{handler="options",quantile="0.5"} NaN -http_response_size_bytes{handler="options",quantile="0.9"} NaN -http_response_size_bytes{handler="options",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="options"} 0 -http_response_size_bytes_count{handler="options"} 0 -http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033 -http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123 -http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128 -http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06 -http_response_size_bytes_count{handler="prometheus"} 462 -http_response_size_bytes{handler="query",quantile="0.5"} 776 -http_response_size_bytes{handler="query",quantile="0.9"} 781 -http_response_size_bytes{handler="query",quantile="0.99"} 781 -http_response_size_bytes_sum{handler="query"} 4656 -http_response_size_bytes_count{handler="query"} 6 -http_response_size_bytes{handler="query_range",quantile="0.5"} NaN -http_response_size_bytes{handler="query_range",quantile="0.9"} NaN -http_response_size_bytes{handler="query_range",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="query_range"} 0 -http_response_size_bytes_count{handler="query_range"} 0 -http_response_size_bytes{handler="rules",quantile="0.5"} NaN -http_response_size_bytes{handler="rules",quantile="0.9"} NaN -http_response_size_bytes{handler="rules",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="rules"} 0 -http_response_size_bytes_count{handler="rules"} 0 -http_response_size_bytes{handler="series",quantile="0.5"} NaN -http_response_size_bytes{handler="series",quantile="0.9"} NaN -http_response_size_bytes{handler="series",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="series"} 0 -http_response_size_bytes_count{handler="series"} 0 -http_response_size_bytes{handler="static",quantile="0.5"} 6316 -http_response_size_bytes{handler="static",quantile="0.9"} 6316 -http_response_size_bytes{handler="static",quantile="0.99"} 6316 -http_response_size_bytes_sum{handler="static"} 18948 -http_response_size_bytes_count{handler="static"} 3 -http_response_size_bytes{handler="status",quantile="0.5"} NaN -http_response_size_bytes{handler="status",quantile="0.9"} NaN -http_response_size_bytes{handler="status",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="status"} 0 -http_response_size_bytes_count{handler="status"} 0 -http_response_size_bytes{handler="targets",quantile="0.5"} NaN -http_response_size_bytes{handler="targets",quantile="0.9"} NaN -http_response_size_bytes{handler="targets",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="targets"} 0 -http_response_size_bytes_count{handler="targets"} 0 -http_response_size_bytes{handler="version",quantile="0.5"} NaN -http_response_size_bytes{handler="version",quantile="0.9"} NaN -http_response_size_bytes{handler="version",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="version"} 0 -http_response_size_bytes_count{handler="version"} 0 -prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1 -prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09 -prometheus_config_last_reload_successful 1 -prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds_count 1 -prometheus_evaluator_iterations_skipped_total 0 -prometheus_notifications_dropped_total 0 -prometheus_notifications_queue_capacity 10000 -prometheus_notifications_queue_length 0 -prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0 -prometheus_rule_evaluation_failures_total{rule_type="recording"} 0 -prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_azure_refresh_duration_seconds_sum 0 -prometheus_sd_azure_refresh_duration_seconds_count 0 -prometheus_sd_azure_refresh_failures_total 0 -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN -prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN -prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_failures_total 0 -prometheus_sd_dns_lookup_failures_total 0 -prometheus_sd_dns_lookups_total 0 -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_ec2_refresh_duration_seconds_sum 0 -prometheus_sd_ec2_refresh_duration_seconds_count 0 -prometheus_sd_ec2_refresh_failures_total 0 -prometheus_sd_file_read_errors_total 0 -prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN -prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN -prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN -prometheus_sd_file_scan_duration_seconds_sum 0 -prometheus_sd_file_scan_duration_seconds_count 0 -prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN -prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN -prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN -prometheus_sd_gce_refresh_duration_sum 0 -prometheus_sd_gce_refresh_duration_count 0 -prometheus_sd_gce_refresh_failures_total 0 -prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_marathon_refresh_duration_seconds_sum 0 -prometheus_sd_marathon_refresh_duration_seconds_count 0 -prometheus_sd_marathon_refresh_failures_total 0 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006 -prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995 -prometheus_target_interval_length_seconds_count{interval="50ms"} 685 -prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1 -prometheus_target_skipped_scrapes_total 0 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002 -prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002 -prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 -prometheus_treecache_watcher_goroutines 0 -prometheus_treecache_zookeeper_failures_total 0 -# EOF diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.txt b/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.txt deleted file mode 100644 index 174f383e911..00000000000 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.txt +++ /dev/null @@ -1,529 +0,0 @@ -# HELP go_gc_duration_seconds A summary of the GC invocation durations. -# TYPE go_gc_duration_seconds summary -go_gc_duration_seconds{quantile="0"} 4.9351e-05 -go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 -go_gc_duration_seconds{quantile="0.5"} 8.3835e-05 -go_gc_duration_seconds{quantile="0.75"} 0.000106744 -go_gc_duration_seconds{quantile="1"} 0.002072195 -go_gc_duration_seconds_sum 0.012139815 -go_gc_duration_seconds_count 99 -# HELP go_goroutines Number of goroutines that currently exist. -# TYPE go_goroutines gauge -go_goroutines 33 -# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. -# TYPE go_memstats_alloc_bytes gauge -go_memstats_alloc_bytes 1.7518624e+07 -# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. -# TYPE go_memstats_alloc_bytes_total counter -go_memstats_alloc_bytes_total 8.3062296e+08 -# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. -# TYPE go_memstats_buck_hash_sys_bytes gauge -go_memstats_buck_hash_sys_bytes 1.494637e+06 -# HELP go_memstats_frees_total Total number of frees. -# TYPE go_memstats_frees_total counter -go_memstats_frees_total 4.65658e+06 -# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. -# TYPE go_memstats_gc_sys_bytes gauge -go_memstats_gc_sys_bytes 1.107968e+06 -# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. -# TYPE go_memstats_heap_alloc_bytes gauge -go_memstats_heap_alloc_bytes 1.7518624e+07 -# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. -# TYPE go_memstats_heap_idle_bytes gauge -go_memstats_heap_idle_bytes 6.668288e+06 -# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. -# TYPE go_memstats_heap_inuse_bytes gauge -go_memstats_heap_inuse_bytes 1.8956288e+07 -# HELP go_memstats_heap_objects Number of allocated objects. -# TYPE go_memstats_heap_objects gauge -go_memstats_heap_objects 72755 -# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS. -# TYPE go_memstats_heap_released_bytes_total counter -go_memstats_heap_released_bytes_total 0 -# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. -# TYPE go_memstats_heap_sys_bytes gauge -go_memstats_heap_sys_bytes 2.5624576e+07 -# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. -# TYPE go_memstats_last_gc_time_seconds gauge -go_memstats_last_gc_time_seconds 1.4843955586166437e+09 -# HELP go_memstats_lookups_total Total number of pointer lookups. -# TYPE go_memstats_lookups_total counter -go_memstats_lookups_total 2089 -# HELP go_memstats_mallocs_total Total number of mallocs. -# TYPE go_memstats_mallocs_total counter -go_memstats_mallocs_total 4.729335e+06 -# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. -# TYPE go_memstats_mcache_inuse_bytes gauge -go_memstats_mcache_inuse_bytes 9600 -# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. -# TYPE go_memstats_mcache_sys_bytes gauge -go_memstats_mcache_sys_bytes 16384 -# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. -# TYPE go_memstats_mspan_inuse_bytes gauge -go_memstats_mspan_inuse_bytes 211520 -# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. -# TYPE go_memstats_mspan_sys_bytes gauge -go_memstats_mspan_sys_bytes 245760 -# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. -# TYPE go_memstats_next_gc_bytes gauge -go_memstats_next_gc_bytes 2.033527e+07 -# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. -# TYPE go_memstats_other_sys_bytes gauge -go_memstats_other_sys_bytes 2.077323e+06 -# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. -# TYPE go_memstats_stack_inuse_bytes gauge -go_memstats_stack_inuse_bytes 1.6384e+06 -# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. -# TYPE go_memstats_stack_sys_bytes gauge -go_memstats_stack_sys_bytes 1.6384e+06 -# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations. -# TYPE go_memstats_sys_bytes gauge -go_memstats_sys_bytes 3.2205048e+07 -# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. -# TYPE http_request_duration_microseconds summary -http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN -http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN -http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="alerts"} 0 -http_request_duration_microseconds_count{handler="alerts"} 0 -http_request_duration_microseconds{handler="config",quantile="0.5"} NaN -http_request_duration_microseconds{handler="config",quantile="0.9"} NaN -http_request_duration_microseconds{handler="config",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="config"} 0 -http_request_duration_microseconds_count{handler="config"} 0 -http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN -http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN -http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="consoles"} 0 -http_request_duration_microseconds_count{handler="consoles"} 0 -http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN -http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN -http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="drop_series"} 0 -http_request_duration_microseconds_count{handler="drop_series"} 0 -http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN -http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN -http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="federate"} 0 -http_request_duration_microseconds_count{handler="federate"} 0 -http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN -http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN -http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="flags"} 0 -http_request_duration_microseconds_count{handler="flags"} 0 -http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655 -http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823 -http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823 -http_request_duration_microseconds_sum{handler="graph"} 5803.93 -http_request_duration_microseconds_count{handler="graph"} 3 -http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN -http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN -http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="heap"} 0 -http_request_duration_microseconds_count{handler="heap"} 0 -http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401 -http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708 -http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708 -http_request_duration_microseconds_sum{handler="label_values"} 3995.574 -http_request_duration_microseconds_count{handler="label_values"} 3 -http_request_duration_microseconds{handler="options",quantile="0.5"} NaN -http_request_duration_microseconds{handler="options",quantile="0.9"} NaN -http_request_duration_microseconds{handler="options",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="options"} 0 -http_request_duration_microseconds_count{handler="options"} 0 -http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859 -http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035 -http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523 -http_request_duration_microseconds_sum{handler="prometheus"} 661851.54 -http_request_duration_microseconds_count{handler="prometheus"} 462 -http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448 -http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558 -http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558 -http_request_duration_microseconds_sum{handler="query"} 26074.11 -http_request_duration_microseconds_count{handler="query"} 6 -http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN -http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN -http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="query_range"} 0 -http_request_duration_microseconds_count{handler="query_range"} 0 -http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN -http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN -http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="rules"} 0 -http_request_duration_microseconds_count{handler="rules"} 0 -http_request_duration_microseconds{handler="series",quantile="0.5"} NaN -http_request_duration_microseconds{handler="series",quantile="0.9"} NaN -http_request_duration_microseconds{handler="series",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="series"} 0 -http_request_duration_microseconds_count{handler="series"} 0 -http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311 -http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174 -http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174 -http_request_duration_microseconds_sum{handler="static"} 6458.621 -http_request_duration_microseconds_count{handler="static"} 3 -http_request_duration_microseconds{handler="status",quantile="0.5"} NaN -http_request_duration_microseconds{handler="status",quantile="0.9"} NaN -http_request_duration_microseconds{handler="status",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="status"} 0 -http_request_duration_microseconds_count{handler="status"} 0 -http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN -http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN -http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="targets"} 0 -http_request_duration_microseconds_count{handler="targets"} 0 -http_request_duration_microseconds{handler="version",quantile="0.5"} NaN -http_request_duration_microseconds{handler="version",quantile="0.9"} NaN -http_request_duration_microseconds{handler="version",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="version"} 0 -http_request_duration_microseconds_count{handler="version"} 0 -# HELP http_request_size_bytes The HTTP request sizes in bytes. -# TYPE http_request_size_bytes summary -http_request_size_bytes{handler="alerts",quantile="0.5"} NaN -http_request_size_bytes{handler="alerts",quantile="0.9"} NaN -http_request_size_bytes{handler="alerts",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="alerts"} 0 -http_request_size_bytes_count{handler="alerts"} 0 -http_request_size_bytes{handler="config",quantile="0.5"} NaN -http_request_size_bytes{handler="config",quantile="0.9"} NaN -http_request_size_bytes{handler="config",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="config"} 0 -http_request_size_bytes_count{handler="config"} 0 -http_request_size_bytes{handler="consoles",quantile="0.5"} NaN -http_request_size_bytes{handler="consoles",quantile="0.9"} NaN -http_request_size_bytes{handler="consoles",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="consoles"} 0 -http_request_size_bytes_count{handler="consoles"} 0 -http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN -http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN -http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="drop_series"} 0 -http_request_size_bytes_count{handler="drop_series"} 0 -http_request_size_bytes{handler="federate",quantile="0.5"} NaN -http_request_size_bytes{handler="federate",quantile="0.9"} NaN -http_request_size_bytes{handler="federate",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="federate"} 0 -http_request_size_bytes_count{handler="federate"} 0 -http_request_size_bytes{handler="flags",quantile="0.5"} NaN -http_request_size_bytes{handler="flags",quantile="0.9"} NaN -http_request_size_bytes{handler="flags",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="flags"} 0 -http_request_size_bytes_count{handler="flags"} 0 -http_request_size_bytes{handler="graph",quantile="0.5"} 367 -http_request_size_bytes{handler="graph",quantile="0.9"} 389 -http_request_size_bytes{handler="graph",quantile="0.99"} 389 -http_request_size_bytes_sum{handler="graph"} 1145 -http_request_size_bytes_count{handler="graph"} 3 -http_request_size_bytes{handler="heap",quantile="0.5"} NaN -http_request_size_bytes{handler="heap",quantile="0.9"} NaN -http_request_size_bytes{handler="heap",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="heap"} 0 -http_request_size_bytes_count{handler="heap"} 0 -http_request_size_bytes{handler="label_values",quantile="0.5"} 416 -http_request_size_bytes{handler="label_values",quantile="0.9"} 416 -http_request_size_bytes{handler="label_values",quantile="0.99"} 416 -http_request_size_bytes_sum{handler="label_values"} 1248 -http_request_size_bytes_count{handler="label_values"} 3 -http_request_size_bytes{handler="options",quantile="0.5"} NaN -http_request_size_bytes{handler="options",quantile="0.9"} NaN -http_request_size_bytes{handler="options",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="options"} 0 -http_request_size_bytes_count{handler="options"} 0 -http_request_size_bytes{handler="prometheus",quantile="0.5"} 238 -http_request_size_bytes{handler="prometheus",quantile="0.9"} 238 -http_request_size_bytes{handler="prometheus",quantile="0.99"} 238 -http_request_size_bytes_sum{handler="prometheus"} 109956 -http_request_size_bytes_count{handler="prometheus"} 462 -http_request_size_bytes{handler="query",quantile="0.5"} 531 -http_request_size_bytes{handler="query",quantile="0.9"} 531 -http_request_size_bytes{handler="query",quantile="0.99"} 531 -http_request_size_bytes_sum{handler="query"} 3186 -http_request_size_bytes_count{handler="query"} 6 -http_request_size_bytes{handler="query_range",quantile="0.5"} NaN -http_request_size_bytes{handler="query_range",quantile="0.9"} NaN -http_request_size_bytes{handler="query_range",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="query_range"} 0 -http_request_size_bytes_count{handler="query_range"} 0 -http_request_size_bytes{handler="rules",quantile="0.5"} NaN -http_request_size_bytes{handler="rules",quantile="0.9"} NaN -http_request_size_bytes{handler="rules",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="rules"} 0 -http_request_size_bytes_count{handler="rules"} 0 -http_request_size_bytes{handler="series",quantile="0.5"} NaN -http_request_size_bytes{handler="series",quantile="0.9"} NaN -http_request_size_bytes{handler="series",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="series"} 0 -http_request_size_bytes_count{handler="series"} 0 -http_request_size_bytes{handler="static",quantile="0.5"} 379 -http_request_size_bytes{handler="static",quantile="0.9"} 379 -http_request_size_bytes{handler="static",quantile="0.99"} 379 -http_request_size_bytes_sum{handler="static"} 1137 -http_request_size_bytes_count{handler="static"} 3 -http_request_size_bytes{handler="status",quantile="0.5"} NaN -http_request_size_bytes{handler="status",quantile="0.9"} NaN -http_request_size_bytes{handler="status",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="status"} 0 -http_request_size_bytes_count{handler="status"} 0 -http_request_size_bytes{handler="targets",quantile="0.5"} NaN -http_request_size_bytes{handler="targets",quantile="0.9"} NaN -http_request_size_bytes{handler="targets",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="targets"} 0 -http_request_size_bytes_count{handler="targets"} 0 -http_request_size_bytes{handler="version",quantile="0.5"} NaN -http_request_size_bytes{handler="version",quantile="0.9"} NaN -http_request_size_bytes{handler="version",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="version"} 0 -http_request_size_bytes_count{handler="version"} 0 -# HELP http_requests_total Total number of HTTP requests made. -# TYPE http_requests_total counter -http_requests_total{code="200",handler="graph",method="get"} 3 -http_requests_total{code="200",handler="label_values",method="get"} 3 -http_requests_total{code="200",handler="prometheus",method="get"} 462 -http_requests_total{code="200",handler="query",method="get"} 6 -http_requests_total{code="200",handler="static",method="get"} 3 -# HELP http_response_size_bytes The HTTP response sizes in bytes. -# TYPE http_response_size_bytes summary -http_response_size_bytes{handler="alerts",quantile="0.5"} NaN -http_response_size_bytes{handler="alerts",quantile="0.9"} NaN -http_response_size_bytes{handler="alerts",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="alerts"} 0 -http_response_size_bytes_count{handler="alerts"} 0 -http_response_size_bytes{handler="config",quantile="0.5"} NaN -http_response_size_bytes{handler="config",quantile="0.9"} NaN -http_response_size_bytes{handler="config",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="config"} 0 -http_response_size_bytes_count{handler="config"} 0 -http_response_size_bytes{handler="consoles",quantile="0.5"} NaN -http_response_size_bytes{handler="consoles",quantile="0.9"} NaN -http_response_size_bytes{handler="consoles",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="consoles"} 0 -http_response_size_bytes_count{handler="consoles"} 0 -http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN -http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN -http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="drop_series"} 0 -http_response_size_bytes_count{handler="drop_series"} 0 -http_response_size_bytes{handler="federate",quantile="0.5"} NaN -http_response_size_bytes{handler="federate",quantile="0.9"} NaN -http_response_size_bytes{handler="federate",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="federate"} 0 -http_response_size_bytes_count{handler="federate"} 0 -http_response_size_bytes{handler="flags",quantile="0.5"} NaN -http_response_size_bytes{handler="flags",quantile="0.9"} NaN -http_response_size_bytes{handler="flags",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="flags"} 0 -http_response_size_bytes_count{handler="flags"} 0 -http_response_size_bytes{handler="graph",quantile="0.5"} 3619 -http_response_size_bytes{handler="graph",quantile="0.9"} 3619 -http_response_size_bytes{handler="graph",quantile="0.99"} 3619 -http_response_size_bytes_sum{handler="graph"} 10857 -http_response_size_bytes_count{handler="graph"} 3 -http_response_size_bytes{handler="heap",quantile="0.5"} NaN -http_response_size_bytes{handler="heap",quantile="0.9"} NaN -http_response_size_bytes{handler="heap",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="heap"} 0 -http_response_size_bytes_count{handler="heap"} 0 -http_response_size_bytes{handler="label_values",quantile="0.5"} 642 -http_response_size_bytes{handler="label_values",quantile="0.9"} 642 -http_response_size_bytes{handler="label_values",quantile="0.99"} 642 -http_response_size_bytes_sum{handler="label_values"} 1926 -http_response_size_bytes_count{handler="label_values"} 3 -http_response_size_bytes{handler="options",quantile="0.5"} NaN -http_response_size_bytes{handler="options",quantile="0.9"} NaN -http_response_size_bytes{handler="options",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="options"} 0 -http_response_size_bytes_count{handler="options"} 0 -http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033 -http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123 -http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128 -http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06 -http_response_size_bytes_count{handler="prometheus"} 462 -http_response_size_bytes{handler="query",quantile="0.5"} 776 -http_response_size_bytes{handler="query",quantile="0.9"} 781 -http_response_size_bytes{handler="query",quantile="0.99"} 781 -http_response_size_bytes_sum{handler="query"} 4656 -http_response_size_bytes_count{handler="query"} 6 -http_response_size_bytes{handler="query_range",quantile="0.5"} NaN -http_response_size_bytes{handler="query_range",quantile="0.9"} NaN -http_response_size_bytes{handler="query_range",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="query_range"} 0 -http_response_size_bytes_count{handler="query_range"} 0 -http_response_size_bytes{handler="rules",quantile="0.5"} NaN -http_response_size_bytes{handler="rules",quantile="0.9"} NaN -http_response_size_bytes{handler="rules",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="rules"} 0 -http_response_size_bytes_count{handler="rules"} 0 -http_response_size_bytes{handler="series",quantile="0.5"} NaN -http_response_size_bytes{handler="series",quantile="0.9"} NaN -http_response_size_bytes{handler="series",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="series"} 0 -http_response_size_bytes_count{handler="series"} 0 -http_response_size_bytes{handler="static",quantile="0.5"} 6316 -http_response_size_bytes{handler="static",quantile="0.9"} 6316 -http_response_size_bytes{handler="static",quantile="0.99"} 6316 -http_response_size_bytes_sum{handler="static"} 18948 -http_response_size_bytes_count{handler="static"} 3 -http_response_size_bytes{handler="status",quantile="0.5"} NaN -http_response_size_bytes{handler="status",quantile="0.9"} NaN -http_response_size_bytes{handler="status",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="status"} 0 -http_response_size_bytes_count{handler="status"} 0 -http_response_size_bytes{handler="targets",quantile="0.5"} NaN -http_response_size_bytes{handler="targets",quantile="0.9"} NaN -http_response_size_bytes{handler="targets",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="targets"} 0 -http_response_size_bytes_count{handler="targets"} 0 -http_response_size_bytes{handler="version",quantile="0.5"} NaN -http_response_size_bytes{handler="version",quantile="0.9"} NaN -http_response_size_bytes{handler="version",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="version"} 0 -http_response_size_bytes_count{handler="version"} 0 -# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which prometheus was built. -# TYPE prometheus_build_info gauge -prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1 -# HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload. -# TYPE prometheus_config_last_reload_success_timestamp_seconds gauge -prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09 -# HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful. -# TYPE prometheus_config_last_reload_successful gauge -prometheus_config_last_reload_successful 1 -# HELP prometheus_evaluator_duration_seconds The duration of rule group evaluations. -# TYPE prometheus_evaluator_duration_seconds summary -prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds_count 1 -# HELP prometheus_evaluator_iterations_skipped_total The total number of rule group evaluations skipped due to throttled metric storage. -# TYPE prometheus_evaluator_iterations_skipped_total counter -prometheus_evaluator_iterations_skipped_total 0 -# HELP prometheus_notifications_dropped_total Total number of alerts dropped due to alert manager missing in configuration. -# TYPE prometheus_notifications_dropped_total counter -prometheus_notifications_dropped_total 0 -# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. -# TYPE prometheus_notifications_queue_capacity gauge -prometheus_notifications_queue_capacity 10000 -# HELP prometheus_notifications_queue_length The number of alert notifications in the queue. -# TYPE prometheus_notifications_queue_length gauge -prometheus_notifications_queue_length 0 -# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. -# TYPE prometheus_rule_evaluation_failures_total counter -prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0 -prometheus_rule_evaluation_failures_total{rule_type="recording"} 0 -# HELP prometheus_sd_azure_refresh_duration_seconds The duration of a Azure-SD refresh in seconds. -# TYPE prometheus_sd_azure_refresh_duration_seconds summary -prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_azure_refresh_duration_seconds_sum 0 -prometheus_sd_azure_refresh_duration_seconds_count 0 -# HELP prometheus_sd_azure_refresh_failures_total Number of Azure-SD refresh failures. -# TYPE prometheus_sd_azure_refresh_failures_total counter -prometheus_sd_azure_refresh_failures_total 0 -# HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds. -# TYPE prometheus_sd_consul_rpc_duration_seconds summary -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN -prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN -prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 -# HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures. -# TYPE prometheus_sd_consul_rpc_failures_total counter -prometheus_sd_consul_rpc_failures_total 0 -# HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures. -# TYPE prometheus_sd_dns_lookup_failures_total counter -prometheus_sd_dns_lookup_failures_total 0 -# HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups. -# TYPE prometheus_sd_dns_lookups_total counter -prometheus_sd_dns_lookups_total 0 -# HELP prometheus_sd_ec2_refresh_duration_seconds The duration of a EC2-SD refresh in seconds. -# TYPE prometheus_sd_ec2_refresh_duration_seconds summary -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_ec2_refresh_duration_seconds_sum 0 -prometheus_sd_ec2_refresh_duration_seconds_count 0 -# HELP prometheus_sd_ec2_refresh_failures_total The number of EC2-SD scrape failures. -# TYPE prometheus_sd_ec2_refresh_failures_total counter -prometheus_sd_ec2_refresh_failures_total 0 -# HELP prometheus_sd_file_read_errors_total The number of File-SD read errors. -# TYPE prometheus_sd_file_read_errors_total counter -prometheus_sd_file_read_errors_total 0 -# HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds. -# TYPE prometheus_sd_file_scan_duration_seconds summary -prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN -prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN -prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN -prometheus_sd_file_scan_duration_seconds_sum 0 -prometheus_sd_file_scan_duration_seconds_count 0 -# HELP prometheus_sd_gce_refresh_duration The duration of a GCE-SD refresh in seconds. -# TYPE prometheus_sd_gce_refresh_duration summary -prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN -prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN -prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN -prometheus_sd_gce_refresh_duration_sum 0 -prometheus_sd_gce_refresh_duration_count 0 -# HELP prometheus_sd_gce_refresh_failures_total The number of GCE-SD refresh failures. -# TYPE prometheus_sd_gce_refresh_failures_total counter -prometheus_sd_gce_refresh_failures_total 0 -# HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled. -# TYPE prometheus_sd_kubernetes_events_total counter -prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 -# HELP prometheus_sd_marathon_refresh_duration_seconds The duration of a Marathon-SD refresh in seconds. -# TYPE prometheus_sd_marathon_refresh_duration_seconds summary -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_marathon_refresh_duration_seconds_sum 0 -prometheus_sd_marathon_refresh_duration_seconds_count 0 -# HELP prometheus_sd_marathon_refresh_failures_total The number of Marathon-SD refresh failures. -# TYPE prometheus_sd_marathon_refresh_failures_total counter -prometheus_sd_marathon_refresh_failures_total 0 -# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. -# TYPE prometheus_target_interval_length_seconds summary -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006 -prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995 -prometheus_target_interval_length_seconds_count{interval="50ms"} 685 -# HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool. -# TYPE prometheus_target_scrape_pool_sync_total counter -prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1 -# HELP prometheus_target_skipped_scrapes_total Total number of scrapes that were skipped because the metric storage was throttled. -# TYPE prometheus_target_skipped_scrapes_total counter -prometheus_target_skipped_scrapes_total 0 -# HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool. -# TYPE prometheus_target_sync_length_seconds summary -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002 -prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002 -prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 -# HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines. -# TYPE prometheus_treecache_watcher_goroutines gauge -prometheus_treecache_watcher_goroutines 0 -# HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures. -# TYPE prometheus_treecache_zookeeper_failures_total counter -prometheus_treecache_zookeeper_failures_total 0 -# EOF diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go index e384a75fca4..a77e1d728f3 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go @@ -20,7 +20,9 @@ import ( "fmt" "io" "math" + "strconv" "strings" + "sync" "unicode/utf8" "github.com/gogo/protobuf/proto" @@ -34,6 +36,15 @@ import ( dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" ) +// floatFormatBufPool is exclusively used in formatOpenMetricsFloat. +var floatFormatBufPool = sync.Pool{ + New: func() interface{} { + // To contain at most 17 digits and additional syntax for a float64. + b := make([]byte, 0, 24) + return &b + }, +} + // ProtobufParser is a very inefficient way of unmarshaling the old Prometheus // protobuf format and then present it as it if were parsed by a // Prometheus-2-style text parser. This is only done so that we can easily plug @@ -457,6 +468,12 @@ func (p *ProtobufParser) Next() (Entry, error) { p.state = EntryHelp case EntryHelp: + if p.mf.Unit != "" { + p.state = EntryUnit + } else { + p.state = EntryType + } + case EntryUnit: p.state = EntryType case EntryType: t := p.mf.GetType() @@ -604,7 +621,7 @@ func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { return totalLength, mf.Unmarshal(b[varIntLength:totalLength]) } -// formatOpenMetricsFloat works like the usual Go string formatting of a fleat +// formatOpenMetricsFloat works like the usual Go string formatting of a float // but appends ".0" if the resulting number would otherwise contain neither a // "." nor an "e". func formatOpenMetricsFloat(f float64) string { @@ -623,11 +640,15 @@ func formatOpenMetricsFloat(f float64) string { case math.IsInf(f, -1): return "-Inf" } - s := fmt.Sprint(f) - if strings.ContainsAny(s, "e.") { - return s + bp := floatFormatBufPool.Get().(*[]byte) + defer floatFormatBufPool.Put(bp) + + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + if bytes.ContainsAny(*bp, "e.") { + return string(*bp) } - return s + ".0" + *bp = append(*bp, '.', '0') + return string(*bp) } // isNativeHistogram returns false iff the provided histograms has no spans at diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/notifier.go index 218e4cb8c74..e970b67e6d2 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go +++ b/vendor/github.com/prometheus/prometheus/notifier/notifier.go @@ -16,25 +16,28 @@ package notifier import ( "bytes" "context" + "crypto/md5" + "encoding/hex" "encoding/json" "fmt" "io" + "log/slog" "net/http" "net/url" "path" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/go-openapi/strfmt" "github.com/prometheus/alertmanager/api/v2/models" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/sigv4" "github.com/prometheus/common/version" "go.uber.org/atomic" + "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -117,7 +120,7 @@ type Manager struct { stopRequested chan struct{} alertmanagers map[string]*alertmanagerSet - logger log.Logger + logger *slog.Logger } // Options are the configurable parameters of a Handler. @@ -218,12 +221,12 @@ func do(ctx context.Context, client *http.Client, req *http.Request) (*http.Resp } // NewManager is the manager constructor. -func NewManager(o *Options, logger log.Logger) *Manager { +func NewManager(o *Options, logger *slog.Logger) *Manager { if o.Do == nil { o.Do = do } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } n := &Manager{ @@ -257,6 +260,16 @@ func (n *Manager) ApplyConfig(conf *config.Config) error { n.opts.RelabelConfigs = conf.AlertingConfig.AlertRelabelConfigs amSets := make(map[string]*alertmanagerSet) + // configToAlertmanagers maps alertmanager sets for each unique AlertmanagerConfig, + // helping to avoid dropping known alertmanagers and re-use them without waiting for SD updates when applying the config. + configToAlertmanagers := make(map[string]*alertmanagerSet, len(n.alertmanagers)) + for _, oldAmSet := range n.alertmanagers { + hash, err := oldAmSet.configHash() + if err != nil { + return err + } + configToAlertmanagers[hash] = oldAmSet + } for k, cfg := range conf.AlertingConfig.AlertmanagerConfigs.ToMap() { ams, err := newAlertmanagerSet(cfg, n.logger, n.metrics) @@ -264,6 +277,16 @@ func (n *Manager) ApplyConfig(conf *config.Config) error { return err } + hash, err := ams.configHash() + if err != nil { + return err + } + + if oldAmSet, ok := configToAlertmanagers[hash]; ok { + ams.ams = oldAmSet.ams + ams.droppedAms = oldAmSet.droppedAms + } + amSets[k] = ams } @@ -319,7 +342,7 @@ func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { }() wg.Wait() - level.Info(n.logger).Log("msg", "Notification manager stopped") + n.logger.Info("Notification manager stopped") } // sendLoop continuously consumes the notifications queue and sends alerts to @@ -376,20 +399,20 @@ func (n *Manager) sendOneBatch() { func (n *Manager) drainQueue() { if !n.opts.DrainOnShutdown { if n.queueLen() > 0 { - level.Warn(n.logger).Log("msg", "Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen()) + n.logger.Warn("Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen()) n.metrics.dropped.Add(float64(n.queueLen())) } return } - level.Info(n.logger).Log("msg", "Draining any remaining notifications...") + n.logger.Info("Draining any remaining notifications...") for n.queueLen() > 0 { n.sendOneBatch() } - level.Info(n.logger).Log("msg", "Remaining notifications drained") + n.logger.Info("Remaining notifications drained") } func (n *Manager) reload(tgs map[string][]*targetgroup.Group) { @@ -399,7 +422,7 @@ func (n *Manager) reload(tgs map[string][]*targetgroup.Group) { for id, tgroup := range tgs { am, ok := n.alertmanagers[id] if !ok { - level.Error(n.logger).Log("msg", "couldn't sync alert manager set", "err", fmt.Sprintf("invalid id:%v", id)) + n.logger.Error("couldn't sync alert manager set", "err", fmt.Sprintf("invalid id:%v", id)) continue } am.sync(tgroup) @@ -422,7 +445,7 @@ func (n *Manager) Send(alerts ...*Alert) { if d := len(alerts) - n.opts.QueueCapacity; d > 0 { alerts = alerts[d:] - level.Warn(n.logger).Log("msg", "Alert batch larger than queue capacity, dropping alerts", "num_dropped", d) + n.logger.Warn("Alert batch larger than queue capacity, dropping alerts", "num_dropped", d) n.metrics.dropped.Add(float64(d)) } @@ -431,7 +454,7 @@ func (n *Manager) Send(alerts ...*Alert) { if d := (len(n.queue) + len(alerts)) - n.opts.QueueCapacity; d > 0 { n.queue = n.queue[d:] - level.Warn(n.logger).Log("msg", "Alert notification queue full, dropping alerts", "num_dropped", d) + n.logger.Warn("Alert notification queue full, dropping alerts", "num_dropped", d) n.metrics.dropped.Add(float64(d)) } n.queue = append(n.queue, alerts...) @@ -519,10 +542,10 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { begin := time.Now() - // v1Payload and v2Payload represent 'alerts' marshaled for Alertmanager API - // v1 or v2. Marshaling happens below. Reference here is for caching between + // cachedPayload represent 'alerts' marshaled for Alertmanager API v2. + // Marshaling happens below. Reference here is for caching between // for loop iterations. - var v1Payload, v2Payload []byte + var cachedPayload []byte n.mtx.RLock() amSets := n.alertmanagers @@ -553,42 +576,29 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { continue } // We can't use the cached values from previous iteration. - v1Payload, v2Payload = nil, nil + cachedPayload = nil } switch ams.cfg.APIVersion { - case config.AlertmanagerAPIVersionV1: - { - if v1Payload == nil { - v1Payload, err = json.Marshal(amAlerts) - if err != nil { - level.Error(n.logger).Log("msg", "Encoding alerts for Alertmanager API v1 failed", "err", err) - ams.mtx.RUnlock() - return false - } - } - - payload = v1Payload - } case config.AlertmanagerAPIVersionV2: { - if v2Payload == nil { + if cachedPayload == nil { openAPIAlerts := alertsToOpenAPIAlerts(amAlerts) - v2Payload, err = json.Marshal(openAPIAlerts) + cachedPayload, err = json.Marshal(openAPIAlerts) if err != nil { - level.Error(n.logger).Log("msg", "Encoding alerts for Alertmanager API v2 failed", "err", err) + n.logger.Error("Encoding alerts for Alertmanager API v2 failed", "err", err) ams.mtx.RUnlock() return false } } - payload = v2Payload + payload = cachedPayload } default: { - level.Error(n.logger).Log( - "msg", fmt.Sprintf("Invalid Alertmanager API version '%v', expected one of '%v'", ams.cfg.APIVersion, config.SupportedAlertmanagerAPIVersions), + n.logger.Error( + fmt.Sprintf("Invalid Alertmanager API version '%v', expected one of '%v'", ams.cfg.APIVersion, config.SupportedAlertmanagerAPIVersions), "err", err, ) ams.mtx.RUnlock() @@ -598,7 +608,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { if len(ams.cfg.AlertRelabelConfigs) > 0 { // We can't use the cached values on the next iteration. - v1Payload, v2Payload = nil, nil + cachedPayload = nil } for _, am := range ams.ams { @@ -609,7 +619,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) { if err := n.sendOne(ctx, client, url, payload); err != nil { - level.Error(n.logger).Log("alertmanager", url, "count", count, "msg", "Error sending alert", "err", err) + n.logger.Error("Error sending alert", "alertmanager", url, "count", count, "err", err) n.metrics.errors.WithLabelValues(url).Inc() } else { numSuccess.Inc() @@ -689,7 +699,7 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b // // Stop is safe to call multiple times. func (n *Manager) Stop() { - level.Info(n.logger).Log("msg", "Stopping notification manager...") + n.logger.Info("Stopping notification manager...") n.stopOnce.Do(func() { close(n.stopRequested) @@ -724,10 +734,10 @@ type alertmanagerSet struct { mtx sync.RWMutex ams []alertmanager droppedAms []alertmanager - logger log.Logger + logger *slog.Logger } -func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger log.Logger, metrics *alertMetrics) (*alertmanagerSet, error) { +func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger *slog.Logger, metrics *alertMetrics) (*alertmanagerSet, error) { client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager") if err != nil { return nil, err @@ -761,7 +771,7 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { for _, tg := range tgs { ams, droppedAms, err := AlertmanagerFromGroup(tg, s.cfg) if err != nil { - level.Error(s.logger).Log("msg", "Creating discovered Alertmanagers failed", "err", err) + s.logger.Error("Creating discovered Alertmanagers failed", "err", err) continue } allAms = append(allAms, ams...) @@ -770,6 +780,7 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { s.mtx.Lock() defer s.mtx.Unlock() + previousAms := s.ams // Set new Alertmanagers and deduplicate them along their unique URL. s.ams = []alertmanager{} s.droppedAms = []alertmanager{} @@ -789,6 +800,26 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { seen[us] = struct{}{} s.ams = append(s.ams, am) } + // Now remove counters for any removed Alertmanagers. + for _, am := range previousAms { + us := am.url().String() + if _, ok := seen[us]; ok { + continue + } + s.metrics.latency.DeleteLabelValues(us) + s.metrics.sent.DeleteLabelValues(us) + s.metrics.errors.DeleteLabelValues(us) + seen[us] = struct{}{} + } +} + +func (s *alertmanagerSet) configHash() (string, error) { + b, err := yaml.Marshal(s.cfg) + if err != nil { + return "", err + } + hash := md5.Sum(b) + return hex.EncodeToString(hash[:]), nil } func postPath(pre string, v config.AlertmanagerAPIVersion) string { diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index c71c25fc307..d9b7ba68602 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "reflect" "runtime" @@ -30,10 +31,9 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -43,6 +43,7 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/annotations" @@ -125,7 +126,11 @@ type QueryEngine interface { // QueryLogger is an interface that can be used to log all the queries logged // by the engine. type QueryLogger interface { - Log(...interface{}) error + Error(msg string, args ...any) + Info(msg string, args ...any) + Debug(msg string, args ...any) + Warn(msg string, args ...any) + With(args ...any) Close() error } @@ -288,7 +293,7 @@ type QueryTracker interface { // EngineOpts contains configuration options used when creating a new Engine. type EngineOpts struct { - Logger log.Logger + Logger *slog.Logger Reg prometheus.Registerer MaxSamples int Timeout time.Duration @@ -326,7 +331,7 @@ type EngineOpts struct { // Engine handles the lifetime of queries from beginning to end. // It is connected to a querier. type Engine struct { - logger log.Logger + logger *slog.Logger metrics *engineMetrics timeout time.Duration maxSamplesPerQuery int @@ -344,7 +349,7 @@ type Engine struct { // NewEngine returns a new engine. func NewEngine(opts EngineOpts) *Engine { if opts.Logger == nil { - opts.Logger = log.NewNopLogger() + opts.Logger = promslog.NewNopLogger() } queryResultSummary := prometheus.NewSummaryVec(prometheus.SummaryOpts{ @@ -403,7 +408,7 @@ func NewEngine(opts EngineOpts) *Engine { if opts.LookbackDelta == 0 { opts.LookbackDelta = defaultLookbackDelta if l := opts.Logger; l != nil { - level.Debug(l).Log("msg", "Lookback delta is zero, setting to default value", "value", defaultLookbackDelta) + l.Debug("Lookback delta is zero, setting to default value", "value", defaultLookbackDelta) } } @@ -455,7 +460,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { // not make reload fail; only log a warning. err := ng.queryLogger.Close() if err != nil { - level.Warn(ng.logger).Log("msg", "Error while closing the previous query log file", "err", err) + ng.logger.Warn("Error while closing the previous query log file", "err", err) } } @@ -633,23 +638,23 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota // The step provided by the user is in seconds. params["step"] = int64(eq.Interval / (time.Second / time.Nanosecond)) } - f := []interface{}{"params", params} + l.With("params", params) if err != nil { - f = append(f, "error", err) + l.With("error", err) } - f = append(f, "stats", stats.NewQueryStats(q.Stats())) + l.With("stats", stats.NewQueryStats(q.Stats())) if span := trace.SpanFromContext(ctx); span != nil { - f = append(f, "spanID", span.SpanContext().SpanID()) + l.With("spanID", span.SpanContext().SpanID()) } if origin := ctx.Value(QueryOrigin{}); origin != nil { for k, v := range origin.(map[string]interface{}) { - f = append(f, k, v) + l.With(k, v) } } - if err := l.Log(f...); err != nil { - ng.metrics.queryLogFailures.Inc() - level.Error(ng.logger).Log("msg", "can't log query", "err", err) - } + l.Info("promql query logged") + // TODO: @tjhop -- do we still need this metric/error log if logger doesn't return errors? + // ng.metrics.queryLogFailures.Inc() + // ng.logger.Error("can't log query", "err", err) } ng.queryLoggerLock.RUnlock() }() @@ -912,11 +917,17 @@ func getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path } if evalRange == 0 { - start -= durationMilliseconds(s.LookbackDelta) + // Reduce the start by one fewer ms than the lookback delta + // because wo want to exclude samples that are precisely the + // lookback delta before the eval time. + start -= durationMilliseconds(s.LookbackDelta) - 1 } else { - // For all matrix queries we want to ensure that we have (end-start) + range selected - // this way we have `range` data before the start time - start -= durationMilliseconds(evalRange) + // For all matrix queries we want to ensure that we have + // (end-start) + range selected this way we have `range` data + // before the start time. We subtract one from the range to + // exclude samples positioned directly at the lower boundary of + // the range. + start -= durationMilliseconds(evalRange) - 1 } offsetMilliseconds := durationMilliseconds(n.OriginalOffset) @@ -1056,7 +1067,7 @@ type evaluator struct { maxSamples int currentSamples int - logger log.Logger + logger *slog.Logger lookbackDelta time.Duration samplesStats *stats.QuerySamples noStepSubqueryIntervalFn func(rangeMillis int64) int64 @@ -1087,7 +1098,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp buf := make([]byte, 64<<10) buf = buf[:runtime.Stack(buf, false)] - level.Error(ev.logger).Log("msg", "runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf)) + ev.logger.Error("runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf)) *errp = fmt.Errorf("unexpected error: %w", err) case errWithWarnings: *errp = err.err @@ -1514,7 +1525,7 @@ func (ev *evaluator) evalSubquery(ctx context.Context, subq *parser.SubqueryExpr // Avoid double counting samples when running a subquery, those samples will be counted in later stage. ev.samplesStats = ev.samplesStats.NewChild() val, ws := ev.eval(ctx, subq) - // But do incorporate the peak from the subquery + // But do incorporate the peak from the subquery. samplesStats.UpdatePeakFromSubquery(ev.samplesStats) ev.samplesStats = samplesStats mat := val.(Matrix) @@ -1913,20 +1924,20 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, }, e.LHS, e.RHS) default: return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh) + vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh) + vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh) + vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } @@ -1979,7 +1990,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, // Start with the first timestamp after (ev.startTimestamp - offset - range) // that is aligned with the step (multiple of 'newEv.interval'). newEv.startTimestamp = newEv.interval * ((ev.startTimestamp - offsetMillis - rangeMillis) / newEv.interval) - if newEv.startTimestamp < (ev.startTimestamp - offsetMillis - rangeMillis) { + if newEv.startTimestamp <= (ev.startTimestamp - offsetMillis - rangeMillis) { newEv.startTimestamp += newEv.interval } @@ -2099,7 +2110,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co seriesIterators := make([]*storage.MemoizedSeriesIterator, len(vs.Series)) for i, s := range vs.Series { it := s.Iterator(nil) - seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)) + seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)-1) } return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { @@ -2161,7 +2172,7 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, of if valueType == chunkenc.ValNone || t > refTime { var ok bool t, v, h, ok = it.PeekPrev() - if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) { + if !ok || t <= refTime-durationMilliseconds(ev.lookbackDelta) { return 0, 0, nil, false } } @@ -2295,20 +2306,20 @@ func (ev *evaluator) matrixIterSlice( mintFloats, mintHistograms := mint, mint // First floats... - if len(floats) > 0 && floats[len(floats)-1].T >= mint { + if len(floats) > 0 && floats[len(floats)-1].T > mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; floats[drop].T < mint; drop++ { + for drop = 0; floats[drop].T <= mint; drop++ { } ev.currentSamples -= drop copy(floats, floats[drop:]) floats = floats[:len(floats)-drop] // Only append points with timestamps after the last timestamp we have. - mintFloats = floats[len(floats)-1].T + 1 + mintFloats = floats[len(floats)-1].T } else { ev.currentSamples -= len(floats) if floats != nil { @@ -2317,14 +2328,14 @@ func (ev *evaluator) matrixIterSlice( } // ...then the same for histograms. TODO(beorn7): Use generics? - if len(histograms) > 0 && histograms[len(histograms)-1].T >= mint { + if len(histograms) > 0 && histograms[len(histograms)-1].T > mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; histograms[drop].T < mint; drop++ { + for drop = 0; histograms[drop].T <= mint; drop++ { } // Rotate the buffer around the drop index so that points before mint can be // reused to store new histograms. @@ -2335,7 +2346,7 @@ func (ev *evaluator) matrixIterSlice( histograms = histograms[:len(histograms)-drop] ev.currentSamples -= totalHPointSize(histograms) // Only append points with timestamps after the last timestamp we have. - mintHistograms = histograms[len(histograms)-1].T + 1 + mintHistograms = histograms[len(histograms)-1].T } else { ev.currentSamples -= totalHPointSize(histograms) if histograms != nil { @@ -2359,7 +2370,7 @@ loop: case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: t := buf.AtT() // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mintHistograms { + if t > mintHistograms { if histograms == nil { histograms = getMatrixSelectorHPoints() } @@ -2385,7 +2396,7 @@ loop: continue loop } // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mintFloats { + if t > mintFloats { ev.currentSamples++ if ev.currentSamples > ev.maxSamples { ev.error(ErrTooManySamples(env)) @@ -2520,7 +2531,7 @@ func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatchi } // VectorBinop evaluates a binary operation between two Vectors, excluding set operators. -func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) (Vector, error) { +func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper, pos posrange.PositionRange) (Vector, error) { if matching.Card == parser.CardManyToMany { panic("many-to-many only allowed for set operators") } @@ -2594,12 +2605,14 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * fl, fr = fr, fl hl, hr = hr, hl } - floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr) + floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr, pos) if err != nil { lastErr = err + continue } switch { case returnBool: + histogramValue = nil if keep { floatValue = 1.0 } else { @@ -2703,7 +2716,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V } // VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. -func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) (Vector, error) { +func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper, pos posrange.PositionRange) (Vector, error) { var lastErr error for _, lhsSample := range lhs { lf, rf := lhsSample.F, rhs.V @@ -2715,9 +2728,10 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala lf, rf = rf, lf lh, rh = rh, lh } - float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh) + float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh, pos) if err != nil { lastErr = err + continue } // Catch cases where the scalar is the LHS in a scalar-vector comparison operation. // We want to always keep the vector element value as the output value, even if it's on the RHS. @@ -2782,57 +2796,85 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 { } // vectorElemBinop evaluates a binary operation between two Vector elements. -func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { - switch op { - case parser.ADD: - if hlhs != nil && hrhs != nil { - res, err := hlhs.Copy().Add(hrhs) - if err != nil { - return 0, nil, false, err +func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram, pos posrange.PositionRange) (float64, *histogram.FloatHistogram, bool, error) { + opName := parser.ItemTypeStr[op] + switch { + case hlhs == nil && hrhs == nil: + { + switch op { + case parser.ADD: + return lhs + rhs, nil, true, nil + case parser.SUB: + return lhs - rhs, nil, true, nil + case parser.MUL: + return lhs * rhs, nil, true, nil + case parser.DIV: + return lhs / rhs, nil, true, nil + case parser.POW: + return math.Pow(lhs, rhs), nil, true, nil + case parser.MOD: + return math.Mod(lhs, rhs), nil, true, nil + case parser.EQLC: + return lhs, nil, lhs == rhs, nil + case parser.NEQ: + return lhs, nil, lhs != rhs, nil + case parser.GTR: + return lhs, nil, lhs > rhs, nil + case parser.LSS: + return lhs, nil, lhs < rhs, nil + case parser.GTE: + return lhs, nil, lhs >= rhs, nil + case parser.LTE: + return lhs, nil, lhs <= rhs, nil + case parser.ATAN2: + return math.Atan2(lhs, rhs), nil, true, nil + } + } + case hlhs == nil && hrhs != nil: + { + switch op { + case parser.MUL: + return 0, hrhs.Copy().Mul(lhs).Compact(0), true, nil + case parser.ADD, parser.SUB, parser.DIV, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("float", opName, "histogram", pos) } - return 0, res.Compact(0), true, nil } - return lhs + rhs, nil, true, nil - case parser.SUB: - if hlhs != nil && hrhs != nil { - res, err := hlhs.Copy().Sub(hrhs) - if err != nil { - return 0, nil, false, err + case hlhs != nil && hrhs == nil: + { + switch op { + case parser.MUL: + return 0, hlhs.Copy().Mul(rhs).Compact(0), true, nil + case parser.DIV: + return 0, hlhs.Copy().Div(rhs).Compact(0), true, nil + case parser.ADD, parser.SUB, parser.POW, parser.MOD, parser.EQLC, parser.NEQ, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", opName, "float", pos) } - return 0, res.Compact(0), true, nil } - return lhs - rhs, nil, true, nil - case parser.MUL: - if hlhs != nil && hrhs == nil { - return 0, hlhs.Copy().Mul(rhs), true, nil - } - if hlhs == nil && hrhs != nil { - return 0, hrhs.Copy().Mul(lhs), true, nil - } - return lhs * rhs, nil, true, nil - case parser.DIV: - if hlhs != nil && hrhs == nil { - return 0, hlhs.Copy().Div(rhs), true, nil + case hlhs != nil && hrhs != nil: + { + switch op { + case parser.ADD: + res, err := hlhs.Copy().Add(hrhs) + if err != nil { + return 0, nil, false, err + } + return 0, res.Compact(0), true, nil + case parser.SUB: + res, err := hlhs.Copy().Sub(hrhs) + if err != nil { + return 0, nil, false, err + } + return 0, res.Compact(0), true, nil + case parser.EQLC: + // This operation expects that both histograms are compacted. + return 0, hlhs, hlhs.Equals(hrhs), nil + case parser.NEQ: + // This operation expects that both histograms are compacted. + return 0, hlhs, !hlhs.Equals(hrhs), nil + case parser.MUL, parser.DIV, parser.POW, parser.MOD, parser.GTR, parser.LSS, parser.GTE, parser.LTE, parser.ATAN2: + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", opName, "histogram", pos) + } } - return lhs / rhs, nil, true, nil - case parser.POW: - return math.Pow(lhs, rhs), nil, true, nil - case parser.MOD: - return math.Mod(lhs, rhs), nil, true, nil - case parser.EQLC: - return lhs, nil, lhs == rhs, nil - case parser.NEQ: - return lhs, nil, lhs != rhs, nil - case parser.GTR: - return lhs, nil, lhs > rhs, nil - case parser.LSS: - return lhs, nil, lhs < rhs, nil - case parser.GTE: - return lhs, nil, lhs >= rhs, nil - case parser.LTE: - return lhs, nil, lhs <= rhs, nil - case parser.ATAN2: - return math.Atan2(lhs, rhs), nil, true, nil } panic(fmt.Errorf("operator %q not allowed for operations between Vectors", op)) } @@ -2890,12 +2932,38 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix group.hasHistogram = true } case parser.STDVAR, parser.STDDEV: - group.floatValue = 0 + switch { + case h != nil: + // Ignore histograms for STDVAR and STDDEV. + group.seen = false + if op == parser.STDVAR { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("stdvar", e.Expr.PositionRange())) + } else { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("stddev", e.Expr.PositionRange())) + } + case math.IsNaN(f), math.IsInf(f, 0): + group.floatValue = math.NaN() + default: + group.floatValue = 0 + } case parser.QUANTILE: + if h != nil { + group.seen = false + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("quantile", e.Expr.PositionRange())) + } group.heap = make(vectorByValueHeap, 1) group.heap[0] = Sample{F: f} case parser.GROUP: group.floatValue = 1 + case parser.MIN, parser.MAX: + if h != nil { + group.seen = false + if op == parser.MIN { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("min", e.Expr.PositionRange())) + } else { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("max", e.Expr.PositionRange())) + } + } } continue } @@ -2994,11 +3062,19 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // Do nothing. Required to avoid the panic in `default:` below. case parser.MAX: + if h != nil { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("max", e.Expr.PositionRange())) + continue + } if group.floatValue < f || math.IsNaN(group.floatValue) { group.floatValue = f } case parser.MIN: + if h != nil { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("min", e.Expr.PositionRange())) + continue + } if group.floatValue > f || math.IsNaN(group.floatValue) { group.floatValue = f } @@ -3012,9 +3088,19 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix delta := f - group.floatMean group.floatMean += delta / group.groupCount group.floatValue += delta * (f - group.floatMean) + } else { + if op == parser.STDVAR { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("stdvar", e.Expr.PositionRange())) + } else { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("stddev", e.Expr.PositionRange())) + } } case parser.QUANTILE: + if h != nil { + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("quantile", e.Expr.PositionRange())) + continue + } group.heap = append(group.heap, Sample{F: f}) default: @@ -3351,6 +3437,9 @@ func handleVectorBinopError(err error, e *parser.BinaryExpr) annotations.Annotat } metricName := "" pos := e.PositionRange() + if errors.Is(err, annotations.PromQLInfo) || errors.Is(err, annotations.PromQLWarning) { + return annotations.New().Add(err) + } if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { return annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index a509f783faf..f9af4fbe092 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -350,7 +350,7 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 { // data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects // how trends in historical data will affect the current data. A higher trend factor increases the influence. // of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing". -func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] // The smoothing factor argument. @@ -533,6 +533,10 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper toNearestInverse := 1.0 / toNearest for _, el := range vec { + if el.H != nil { + // Process only float samples. + continue + } f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse if !enh.enableDelayedNameRemoval { el.Metric = el.Metric.DropMetricName() @@ -1465,7 +1469,7 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio regexStr = stringFromArg(args[4]) ) - regex, err := regexp.Compile("^(?:" + regexStr + ")$") + regex, err := regexp.Compile("^(?s:" + regexStr + ")$") if err != nil { panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr)) } @@ -1499,11 +1503,6 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio return matrix, ws } -// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) (Vector, Annotations) === -func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - panic("funcLabelReplace wrong implementation called") -} - // === Vector(s Scalar) (Vector, Annotations) === func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, @@ -1555,11 +1554,6 @@ func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) return matrix, ws } -// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) (Vector, Annotations) === -func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - panic("funcLabelReplace wrong implementation called") -} - // Common code for date related functions. func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) float64) Vector { if len(vals) == 0 { @@ -1642,83 +1636,83 @@ func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) // FunctionCalls is a list of all functions supported by PromQL, including their types. var FunctionCalls = map[string]FunctionCall{ - "abs": funcAbs, - "absent": funcAbsent, - "absent_over_time": funcAbsentOverTime, - "acos": funcAcos, - "acosh": funcAcosh, - "asin": funcAsin, - "asinh": funcAsinh, - "atan": funcAtan, - "atanh": funcAtanh, - "avg_over_time": funcAvgOverTime, - "ceil": funcCeil, - "changes": funcChanges, - "clamp": funcClamp, - "clamp_max": funcClampMax, - "clamp_min": funcClampMin, - "cos": funcCos, - "cosh": funcCosh, - "count_over_time": funcCountOverTime, - "days_in_month": funcDaysInMonth, - "day_of_month": funcDayOfMonth, - "day_of_week": funcDayOfWeek, - "day_of_year": funcDayOfYear, - "deg": funcDeg, - "delta": funcDelta, - "deriv": funcDeriv, - "exp": funcExp, - "floor": funcFloor, - "histogram_avg": funcHistogramAvg, - "histogram_count": funcHistogramCount, - "histogram_fraction": funcHistogramFraction, - "histogram_quantile": funcHistogramQuantile, - "histogram_sum": funcHistogramSum, - "histogram_stddev": funcHistogramStdDev, - "histogram_stdvar": funcHistogramStdVar, - "holt_winters": funcHoltWinters, - "hour": funcHour, - "idelta": funcIdelta, - "increase": funcIncrease, - "info": nil, - "irate": funcIrate, - "label_replace": funcLabelReplace, - "label_join": funcLabelJoin, - "ln": funcLn, - "log10": funcLog10, - "log2": funcLog2, - "last_over_time": funcLastOverTime, - "mad_over_time": funcMadOverTime, - "max_over_time": funcMaxOverTime, - "min_over_time": funcMinOverTime, - "minute": funcMinute, - "month": funcMonth, - "pi": funcPi, - "predict_linear": funcPredictLinear, - "present_over_time": funcPresentOverTime, - "quantile_over_time": funcQuantileOverTime, - "rad": funcRad, - "rate": funcRate, - "resets": funcResets, - "round": funcRound, - "scalar": funcScalar, - "sgn": funcSgn, - "sin": funcSin, - "sinh": funcSinh, - "sort": funcSort, - "sort_desc": funcSortDesc, - "sort_by_label": funcSortByLabel, - "sort_by_label_desc": funcSortByLabelDesc, - "sqrt": funcSqrt, - "stddev_over_time": funcStddevOverTime, - "stdvar_over_time": funcStdvarOverTime, - "sum_over_time": funcSumOverTime, - "tan": funcTan, - "tanh": funcTanh, - "time": funcTime, - "timestamp": funcTimestamp, - "vector": funcVector, - "year": funcYear, + "abs": funcAbs, + "absent": funcAbsent, + "absent_over_time": funcAbsentOverTime, + "acos": funcAcos, + "acosh": funcAcosh, + "asin": funcAsin, + "asinh": funcAsinh, + "atan": funcAtan, + "atanh": funcAtanh, + "avg_over_time": funcAvgOverTime, + "ceil": funcCeil, + "changes": funcChanges, + "clamp": funcClamp, + "clamp_max": funcClampMax, + "clamp_min": funcClampMin, + "cos": funcCos, + "cosh": funcCosh, + "count_over_time": funcCountOverTime, + "days_in_month": funcDaysInMonth, + "day_of_month": funcDayOfMonth, + "day_of_week": funcDayOfWeek, + "day_of_year": funcDayOfYear, + "deg": funcDeg, + "delta": funcDelta, + "deriv": funcDeriv, + "exp": funcExp, + "floor": funcFloor, + "histogram_avg": funcHistogramAvg, + "histogram_count": funcHistogramCount, + "histogram_fraction": funcHistogramFraction, + "histogram_quantile": funcHistogramQuantile, + "histogram_sum": funcHistogramSum, + "histogram_stddev": funcHistogramStdDev, + "histogram_stdvar": funcHistogramStdVar, + "double_exponential_smoothing": funcDoubleExponentialSmoothing, + "hour": funcHour, + "idelta": funcIdelta, + "increase": funcIncrease, + "info": nil, + "irate": funcIrate, + "label_replace": nil, // evalLabelReplace not called via this map. + "label_join": nil, // evalLabelJoin not called via this map. + "ln": funcLn, + "log10": funcLog10, + "log2": funcLog2, + "last_over_time": funcLastOverTime, + "mad_over_time": funcMadOverTime, + "max_over_time": funcMaxOverTime, + "min_over_time": funcMinOverTime, + "minute": funcMinute, + "month": funcMonth, + "pi": funcPi, + "predict_linear": funcPredictLinear, + "present_over_time": funcPresentOverTime, + "quantile_over_time": funcQuantileOverTime, + "rad": funcRad, + "rate": funcRate, + "resets": funcResets, + "round": funcRound, + "scalar": funcScalar, + "sgn": funcSgn, + "sin": funcSin, + "sinh": funcSinh, + "sort": funcSort, + "sort_desc": funcSortDesc, + "sort_by_label": funcSortByLabel, + "sort_by_label_desc": funcSortByLabelDesc, + "sqrt": funcSqrt, + "stddev_over_time": funcStddevOverTime, + "stdvar_over_time": funcStdvarOverTime, + "sum_over_time": funcSumOverTime, + "tan": funcTan, + "tanh": funcTanh, + "time": funcTime, + "timestamp": funcTimestamp, + "vector": funcVector, + "year": funcYear, } // AtModifierUnsafeFunctions are the functions whose result diff --git a/vendor/github.com/prometheus/prometheus/promql/fuzz.go b/vendor/github.com/prometheus/prometheus/promql/fuzz.go index 5f08e6a72c9..759055fb0d9 100644 --- a/vendor/github.com/prometheus/prometheus/promql/fuzz.go +++ b/vendor/github.com/prometheus/prometheus/promql/fuzz.go @@ -61,17 +61,13 @@ const ( var symbolTable = labels.NewSymbolTable() func fuzzParseMetricWithContentType(in []byte, contentType string) int { - p, warning := textparse.New(in, contentType, false, symbolTable) - if warning != nil { + p, warning := textparse.New(in, contentType, "", false, false, symbolTable) + if p == nil || warning != nil { // An invalid content type is being passed, which should not happen // in this context. panic(warning) } - if contentType == "application/openmetrics-text" { - p = textparse.NewOpenMetricsParser(in, symbolTable) - } - var err error for { _, err = p.Next() @@ -95,7 +91,7 @@ func fuzzParseMetricWithContentType(in []byte, contentType string) int { // Note that this is not the parser for the text-based exposition-format; that // lives in github.com/prometheus/client_golang/text. func FuzzParseMetric(in []byte) int { - return fuzzParseMetricWithContentType(in, "") + return fuzzParseMetricWithContentType(in, "text/plain") } func FuzzParseOpenMetric(in []byte) int { diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go index 434d3cdc1c7..aa65aca2755 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go @@ -202,10 +202,11 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector}, ReturnType: ValueTypeVector, }, - "holt_winters": { - Name: "holt_winters", - ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, - ReturnType: ValueTypeVector, + "double_exponential_smoothing": { + Name: "double_exponential_smoothing", + ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, + ReturnType: ValueTypeVector, + Experimental: true, }, "hour": { Name: "hour", diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go b/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go index f0649a77a82..e078bcb60bb 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go @@ -39,6 +39,7 @@ import ( "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/almost" + "github.com/prometheus/prometheus/util/convertnhcb" "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" ) @@ -46,8 +47,8 @@ import ( var ( patSpace = regexp.MustCompile("[\t ]+") patLoad = regexp.MustCompile(`^load(?:_(with_nhcb))?\s+(.+?)$`) - patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|warn|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) - patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) + patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|warn|ordered|info))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) + patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn|info))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) ) const ( @@ -321,6 +322,8 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { cmd.fail = true case "warn": cmd.warn = true + case "info": + cmd.info = true } for j := 1; i+1 < len(lines); j++ { @@ -477,43 +480,22 @@ func (cmd *loadCmd) append(a storage.Appender) error { return nil } -func getHistogramMetricBase(m labels.Labels, suffix string) (labels.Labels, uint64) { - mName := m.Get(labels.MetricName) - baseM := labels.NewBuilder(m). - Set(labels.MetricName, strings.TrimSuffix(mName, suffix)). - Del(labels.BucketLabel). - Labels() - hash := baseM.Hash() - return baseM, hash -} - type tempHistogramWrapper struct { metric labels.Labels upperBounds []float64 - histogramByTs map[int64]tempHistogram + histogramByTs map[int64]convertnhcb.TempHistogram } func newTempHistogramWrapper() tempHistogramWrapper { return tempHistogramWrapper{ upperBounds: []float64{}, - histogramByTs: map[int64]tempHistogram{}, + histogramByTs: map[int64]convertnhcb.TempHistogram{}, } } -type tempHistogram struct { - bucketCounts map[float64]float64 - count float64 - sum float64 -} - -func newTempHistogram() tempHistogram { - return tempHistogram{ - bucketCounts: map[float64]float64{}, - } -} - -func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap map[uint64]tempHistogramWrapper, smpls []promql.Sample, updateHistogramWrapper func(*tempHistogramWrapper), updateHistogram func(*tempHistogram, float64)) { - m2, m2hash := getHistogramMetricBase(m, suffix) +func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap map[uint64]tempHistogramWrapper, smpls []promql.Sample, updateHistogramWrapper func(*tempHistogramWrapper), updateHistogram func(*convertnhcb.TempHistogram, float64)) { + m2 := convertnhcb.GetHistogramMetricBase(m, suffix) + m2hash := m2.Hash() histogramWrapper, exists := histogramMap[m2hash] if !exists { histogramWrapper = newTempHistogramWrapper() @@ -528,7 +510,7 @@ func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap } histogram, exists := histogramWrapper.histogramByTs[s.T] if !exists { - histogram = newTempHistogram() + histogram = convertnhcb.NewTempHistogram() } updateHistogram(&histogram, s.F) histogramWrapper.histogramByTs[s.T] = histogram @@ -536,34 +518,6 @@ func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap histogramMap[m2hash] = histogramWrapper } -func processUpperBoundsAndCreateBaseHistogram(upperBounds0 []float64) ([]float64, *histogram.FloatHistogram) { - sort.Float64s(upperBounds0) - upperBounds := make([]float64, 0, len(upperBounds0)) - prevLE := math.Inf(-1) - for _, le := range upperBounds0 { - if le != prevLE { // deduplicate - upperBounds = append(upperBounds, le) - prevLE = le - } - } - var customBounds []float64 - if upperBounds[len(upperBounds)-1] == math.Inf(1) { - customBounds = upperBounds[:len(upperBounds)-1] - } else { - customBounds = upperBounds - } - return upperBounds, &histogram.FloatHistogram{ - Count: 0, - Sum: 0, - Schema: histogram.CustomBucketsSchema, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: uint32(len(upperBounds))}, - }, - PositiveBuckets: make([]float64, len(upperBounds)), - CustomValues: customBounds, - } -} - // If classic histograms are defined, convert them into native histograms with custom // bounds and append the defined time series to the storage. func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { @@ -582,16 +536,16 @@ func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { } processClassicHistogramSeries(m, "_bucket", histogramMap, smpls, func(histogramWrapper *tempHistogramWrapper) { histogramWrapper.upperBounds = append(histogramWrapper.upperBounds, le) - }, func(histogram *tempHistogram, f float64) { - histogram.bucketCounts[le] = f + }, func(histogram *convertnhcb.TempHistogram, f float64) { + histogram.BucketCounts[le] = f }) case strings.HasSuffix(mName, "_count"): - processClassicHistogramSeries(m, "_count", histogramMap, smpls, nil, func(histogram *tempHistogram, f float64) { - histogram.count = f + processClassicHistogramSeries(m, "_count", histogramMap, smpls, nil, func(histogram *convertnhcb.TempHistogram, f float64) { + histogram.Count = f }) case strings.HasSuffix(mName, "_sum"): - processClassicHistogramSeries(m, "_sum", histogramMap, smpls, nil, func(histogram *tempHistogram, f float64) { - histogram.sum = f + processClassicHistogramSeries(m, "_sum", histogramMap, smpls, nil, func(histogram *convertnhcb.TempHistogram, f float64) { + histogram.Sum = f }) } } @@ -599,30 +553,21 @@ func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { // Convert the collated classic histogram data into native histograms // with custom bounds and append them to the storage. for _, histogramWrapper := range histogramMap { - upperBounds, fhBase := processUpperBoundsAndCreateBaseHistogram(histogramWrapper.upperBounds) + upperBounds, hBase := convertnhcb.ProcessUpperBoundsAndCreateBaseHistogram(histogramWrapper.upperBounds, true) + fhBase := hBase.ToFloat(nil) samples := make([]promql.Sample, 0, len(histogramWrapper.histogramByTs)) for t, histogram := range histogramWrapper.histogramByTs { - fh := fhBase.Copy() - var prevCount, total float64 - for i, le := range upperBounds { - currCount, exists := histogram.bucketCounts[le] - if !exists { - currCount = 0 + h, fh := convertnhcb.NewHistogram(histogram, upperBounds, hBase, fhBase) + if fh == nil { + if err := h.Validate(); err != nil { + return err } - count := currCount - prevCount - fh.PositiveBuckets[i] = count - total += count - prevCount = currCount - } - fh.Sum = histogram.sum - if histogram.count != 0 { - total = histogram.count + fh = h.ToFloat(nil) } - fh.Count = total - s := promql.Sample{T: t, H: fh.Compact(0)} - if err := s.H.Validate(); err != nil { + if err := fh.Validate(); err != nil { return err } + s := promql.Sample{T: t, H: fh} samples = append(samples, s) } sort.Slice(samples, func(i, j int) bool { return samples[i].T < samples[j].T }) @@ -657,10 +602,10 @@ type evalCmd struct { step time.Duration line int - isRange bool // if false, instant query - fail, warn, ordered bool - expectedFailMessage string - expectedFailRegexp *regexp.Regexp + isRange bool // if false, instant query + fail, warn, ordered, info bool + expectedFailMessage string + expectedFailRegexp *regexp.Regexp metrics map[uint64]labels.Labels expectScalar bool @@ -1208,13 +1153,16 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq if res.Err == nil && cmd.fail { return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line) } - countWarnings, _ := res.Warnings.CountWarningsAndInfo() + countWarnings, countInfo := res.Warnings.CountWarningsAndInfo() if !cmd.warn && countWarnings > 0 { return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) } if cmd.warn && countWarnings == 0 { return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line) } + if cmd.info && countInfo == 0 { + return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", iq.expr, cmd.line) + } err = cmd.compareResult(res.Value) if err != nil { return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err) diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test index 68d2e735b37..19a896a6fbb 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test @@ -229,13 +229,28 @@ load 5m http_requests{job="api-server", instance="0", group="canary"} NaN http_requests{job="api-server", instance="1", group="canary"} 3 http_requests{job="api-server", instance="2", group="canary"} 4 + http_requests_histogram{job="api-server", instance="3", group="canary"} {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}} eval instant at 0m max(http_requests) {} 4 +# The histogram is ignored here so the result doesn't change but it has an info annotation now. +eval_info instant at 0m max({job="api-server"}) + {} 4 + +# The histogram is ignored here so there is no result but it has an info annotation now. +eval_info instant at 0m max(http_requests_histogram) + eval instant at 0m min(http_requests) {} 1 +# The histogram is ignored here so the result doesn't change but it has an info annotation now. +eval_info instant at 0m min({job="api-server"}) + {} 1 + +# The histogram is ignored here so there is no result but it has an info annotation now. +eval_info instant at 0m min(http_requests_histogram) + eval instant at 0m max by (group) (http_requests) {group="production"} 2 {group="canary"} 4 @@ -250,7 +265,7 @@ clear load 5m http_requests{job="api-server", instance="0", group="production"} 0+10x10 http_requests{job="api-server", instance="1", group="production"} 0+20x10 - http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN + http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN http_requests{job="api-server", instance="0", group="canary"} 0+30x10 http_requests{job="api-server", instance="1", group="canary"} 0+40x10 http_requests{job="app-server", instance="0", group="production"} 0+50x10 @@ -337,32 +352,32 @@ load 5m version{job="app-server", instance="0", group="canary"} 7 version{job="app-server", instance="1", group="canary"} 7 -eval instant at 5m count_values("version", version) +eval instant at 1m count_values("version", version) {version="6"} 5 {version="7"} 2 {version="8"} 2 -eval instant at 5m count_values(((("version"))), version) +eval instant at 1m count_values(((("version"))), version) {version="6"} 5 {version="7"} 2 {version="8"} 2 -eval instant at 5m count_values without (instance)("version", version) +eval instant at 1m count_values without (instance)("version", version) {job="api-server", group="production", version="6"} 3 {job="api-server", group="canary", version="8"} 2 {job="app-server", group="production", version="6"} 2 {job="app-server", group="canary", version="7"} 2 # Overwrite label with output. Don't do this. -eval instant at 5m count_values without (instance)("job", version) +eval instant at 1m count_values without (instance)("job", version) {job="6", group="production"} 5 {job="8", group="canary"} 2 {job="7", group="canary"} 2 # Overwrite label with output. Don't do this. -eval instant at 5m count_values by (job, group)("job", version) +eval instant at 1m count_values by (job, group)("job", version) {job="6", group="production"} 5 {job="8", group="canary"} 2 {job="7", group="canary"} 2 @@ -380,6 +395,7 @@ load 10s data{test="uneven samples",point="a"} 0 data{test="uneven samples",point="b"} 1 data{test="uneven samples",point="c"} 4 + data_histogram{test="histogram sample", point="c"} {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}} foo .8 eval instant at 1m quantile without(point)(0.8, data) @@ -387,6 +403,15 @@ eval instant at 1m quantile without(point)(0.8, data) {test="three samples"} 1.6 {test="uneven samples"} 2.8 +# The histogram is ignored here so the result doesn't change but it has an info annotation now. +eval_info instant at 1m quantile without(point)(0.8, {__name__=~"data(_histogram)?"}) + {test="two samples"} 0.8 + {test="three samples"} 1.6 + {test="uneven samples"} 2.8 + +# The histogram is ignored here so there is no result but it has an info annotation now. +eval_info instant at 1m quantile(0.8, data_histogram) + # Bug #5276. eval instant at 1m quantile without(point)(scalar(foo), data) {test="two samples"} 0.8 @@ -572,3 +597,166 @@ clear # #eval instant at 1m count(topk(1,max(up) without()) == topk(1,max(up) without()) == topk(1,max(up) without()) == topk(1,max(up) without()) == topk(1,max(up) without())) # {} 1 + +clear + +# Test stddev produces consistent results regardless the order the data is loaded in. +load 5m + series{label="a"} 1 + series{label="b"} 2 + series{label="c"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}} + +# The histogram is ignored here so the result doesn't change but it has an info annotation now. +eval_info instant at 0m stddev(series) + {} 0.5 + +eval_info instant at 0m stdvar(series) + {} 0.25 + +# The histogram is ignored here so there is no result but it has an info annotation now. +eval_info instant at 0m stddev({label="c"}) + +eval_info instant at 0m stdvar({label="c"}) + +eval instant at 0m stddev by (label) (series) + {label="a"} 0 + {label="b"} 0 + +eval instant at 0m stdvar by (label) (series) + {label="a"} 0 + {label="b"} 0 + +clear + +load 5m + series{label="a"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}} + series{label="b"} 1 + series{label="c"} 2 + +eval instant at 0m stddev(series) + {} 0.5 + +eval instant at 0m stdvar(series) + {} 0.25 + +eval instant at 0m stddev by (label) (series) + {label="b"} 0 + {label="c"} 0 + +eval instant at 0m stdvar by (label) (series) + {label="b"} 0 + {label="c"} 0 + +clear + +load 5m + series{label="a"} 1 + series{label="b"} 2 + series{label="c"} NaN + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN + +eval instant at 0m stddev by (label) (series) + {label="a"} 0 + {label="b"} 0 + {label="c"} NaN + +eval instant at 0m stdvar by (label) (series) + {label="a"} 0 + {label="b"} 0 + {label="c"} NaN + +clear + +load 5m + series{label="a"} NaN + series{label="b"} 1 + series{label="c"} 2 + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN + +eval instant at 0m stddev by (label) (series) + {label="a"} NaN + {label="b"} 0 + {label="c"} 0 + +eval instant at 0m stdvar by (label) (series) + {label="a"} NaN + {label="b"} 0 + {label="c"} 0 + +clear + +load 5m + series NaN + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN + +clear + +load 5m + series{label="a"} 1 + series{label="b"} 2 + series{label="c"} inf + +eval instant at 0m stddev (series) + {} NaN + +eval instant at 0m stdvar (series) + {} NaN + +eval instant at 0m stddev by (label) (series) + {label="a"} 0 + {label="b"} 0 + {label="c"} NaN + +eval instant at 0m stdvar by (label) (series) + {label="a"} 0 + {label="b"} 0 + {label="c"} NaN + +clear + +load 5m + series{label="a"} inf + series{label="b"} 1 + series{label="c"} 2 + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN + +eval instant at 0m stddev by (label) (series) + {label="a"} NaN + {label="b"} 0 + {label="c"} 0 + +eval instant at 0m stdvar by (label) (series) + {label="a"} NaN + {label="b"} 0 + {label="c"} 0 + +clear + +load 5m + series inf + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test index 35f90ee6714..4091f7eabf2 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test @@ -121,45 +121,43 @@ eval instant at 25s sum_over_time(metric{job="1"}[100:1] offset 20 @ 100) # Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries. # Inner most sum=1+2+...+10=55. -# With [100s:25s] subquery, it's 55*5. +# With [100s:25s] subquery, it's 55*4. eval instant at 100s sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50) - {job="1"} 275 + {job="1"} 220 # Nested subqueries with different timestamps on both. # Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries. -# Sum of innermost subquery is 275 as above. The outer subquery repeats it 4 times. +# Sum of innermost subquery is 220 as above. The outer subquery repeats it 3 times. eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50)[3s:1s] @ 3000) - {job="1"} 1100 + {job="1"} 660 # Testing the inner subquery timestamp since vector selector does not have @. # Inner sum for subquery [100s:25s] @ 50 are -# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=4+5=9. -# This sum of 11 is repeated 4 times by outer subquery. +# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=5. +# This sum of 7 is repeated 3 times by outer subquery. eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 50)[3s:1s] @ 200) - {job="1"} 44 + {job="1"} 21 # Inner sum for subquery [100s:25s] @ 200 are -# at 100=9+10, at 125=12, at 150=14+15, at 175=17, at 200=19+20. -# This sum of 116 is repeated 4 times by outer subquery. +# at 125=12, at 150=15, at 175=17, at 200=20. +# This sum of 64 is repeated 3 times by outer subquery. eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 200)[3s:1s] @ 50) - {job="1"} 464 + {job="1"} 192 # Nested subqueries with timestamp only on outer subquery. # Outer most subquery: -# at 900=783 -# inner subquery: at 870=87+86+85, at 880=88+87+86, at 890=89+88+87 -# at 925=537 -# inner subquery: at 895=89+88, at 905=90+89, at 915=90+91 -# at 950=828 -# inner subquery: at 920=92+91+90, at 930=93+92+91, at 940=94+93+92 -# at 975=567 -# inner subquery: at 945=94+93, at 955=95+94, at 965=96+95 -# at 1000=873 -# inner subquery: at 970=97+96+95, at 980=98+97+96, at 990=99+98+97 +# at 925=360 +# inner subquery: at 905=90+89, at 915=91+90 +# at 950=372 +# inner subquery: at 930=93+92, at 940=94+93 +# at 975=380 +# inner subquery: at 955=95+94, at 965=96+95 +# at 1000=392 +# inner subquery: at 980=98+97, at 990=99+98 eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[20s])[20s:10s] offset 10s)[100s:25s] @ 1000) - {job="1"} 3588 + {job="1"} 1504 # minute is counted on the value of the sample. eval instant at 10s minute(metric @ 1500) @@ -182,32 +180,32 @@ eval instant at 15m timestamp(timestamp(metric{job="1"} @ 10)) # minute is counted on the value of the sample. eval instant at 0s sum_over_time(minute(metric @ 1500)[100s:10s]) - {job="1"} 22 - {job="2"} 55 + {job="1"} 20 + {job="2"} 50 # If nothing passed, minute() takes eval time. # Here the eval time is determined by the subquery. # [50m:1m] at 6000, i.e. 100m, is 50m to 100m. -# sum=50+51+52+...+59+0+1+2+...+40. +# sum=51+52+...+59+0+1+2+...+40. eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000) - {} 1365 + {} 1315 -# sum=45+46+47+...+59+0+1+2+...+35. +# sum=46+47+...+59+0+1+2+...+35. eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000 offset 5m) - {} 1410 + {} 1365 # time() is the eval time which is determined by subquery here. -# 2900+2901+...+3000 = (3000*3001 - 2899*2900)/2. +# 2901+...+3000 = (3000*3001 - 2899*2900)/2. eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000) - {} 297950 + {} 295050 -# 2300+2301+...+2400 = (2400*2401 - 2299*2300)/2. +# 2301+...+2400 = (2400*2401 - 2299*2300)/2. eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000 offset 600s) - {} 237350 + {} 235050 # timestamp() takes the time of the sample and not the evaluation time. eval instant at 0s sum_over_time(timestamp(metric{job="1"} @ 10)[100s:10s] @ 3000) - {job="1"} 110 + {job="1"} 100 # The result of inner timestamp() will have the timestamp as the # eval time, hence entire expression is not step invariant and depends on eval time. diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test index 6e2b3630bcb..fb1d1696244 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test @@ -6,11 +6,13 @@ load 5m # Tests for resets(). eval instant at 50m resets(http_requests[5m]) + +eval instant at 50m resets(http_requests[10m]) {path="/foo"} 0 {path="/bar"} 0 {path="/biz"} 0 -eval instant at 50m resets(http_requests[300]) +eval instant at 50m resets(http_requests[600]) {path="/foo"} 0 {path="/bar"} 0 {path="/biz"} 0 @@ -21,6 +23,11 @@ eval instant at 50m resets(http_requests[20m]) {path="/biz"} 0 eval instant at 50m resets(http_requests[30m]) + {path="/foo"} 1 + {path="/bar"} 0 + {path="/biz"} 0 + +eval instant at 50m resets(http_requests[32m]) {path="/foo"} 2 {path="/bar"} 1 {path="/biz"} 0 @@ -34,28 +41,30 @@ eval instant at 50m resets(nonexistent_metric[50m]) # Tests for changes(). eval instant at 50m changes(http_requests[5m]) + +eval instant at 50m changes(http_requests[6m]) {path="/foo"} 0 {path="/bar"} 0 {path="/biz"} 0 eval instant at 50m changes(http_requests[20m]) - {path="/foo"} 3 - {path="/bar"} 3 + {path="/foo"} 2 + {path="/bar"} 2 {path="/biz"} 0 eval instant at 50m changes(http_requests[30m]) - {path="/foo"} 4 - {path="/bar"} 5 - {path="/biz"} 1 + {path="/foo"} 3 + {path="/bar"} 4 + {path="/biz"} 0 eval instant at 50m changes(http_requests[50m]) - {path="/foo"} 8 - {path="/bar"} 9 + {path="/foo"} 7 + {path="/bar"} 8 {path="/biz"} 1 eval instant at 50m changes((http_requests[50m])) - {path="/foo"} 8 - {path="/bar"} 9 + {path="/foo"} 7 + {path="/bar"} 8 {path="/biz"} 1 eval instant at 50m changes(nonexistent_metric[50m]) @@ -66,7 +75,7 @@ load 5m x{a="b"} NaN NaN NaN x{a="c"} 0 NaN 0 -eval instant at 15m changes(x[15m]) +eval instant at 15m changes(x[20m]) {a="b"} 0 {a="c"} 2 @@ -75,14 +84,14 @@ clear # Tests for increase(). load 5m http_requests{path="/foo"} 0+10x10 - http_requests{path="/bar"} 0+10x5 0+10x5 + http_requests{path="/bar"} 0+18x5 0+18x5 http_requests{path="/dings"} 10+10x10 http_requests{path="/bumms"} 1+10x10 # Tests for increase(). eval instant at 50m increase(http_requests[50m]) {path="/foo"} 100 - {path="/bar"} 90 + {path="/bar"} 160 {path="/dings"} 100 {path="/bumms"} 100 @@ -95,7 +104,7 @@ eval instant at 50m increase(http_requests[50m]) # value, and therefore the extrapolation happens only by 30s. eval instant at 50m increase(http_requests[100m]) {path="/foo"} 100 - {path="/bar"} 90 + {path="/bar"} 162 {path="/dings"} 105 {path="/bumms"} 101 @@ -115,15 +124,17 @@ clear # Tests for rate(). load 5m - testcounter_reset_middle 0+10x4 0+10x5 + testcounter_reset_middle 0+27x4 0+27x5 testcounter_reset_end 0+10x9 0 10 # Counter resets at in the middle of range are handled correctly by rate(). eval instant at 50m rate(testcounter_reset_middle[50m]) - {} 0.03 + {} 0.08 # Counter resets at end of range are ignored by rate(). eval instant at 50m rate(testcounter_reset_end[5m]) + +eval instant at 50m rate(testcounter_reset_end[6m]) {} 0 clear @@ -242,24 +253,24 @@ eval instant at 50m deriv(testcounter_reset_middle[100m]) # intercept at t=3000: 38.63636363636364 # intercept at t=3000+3600: 76.81818181818181 eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600) - {} 76.81818181818181 + {} 70 eval instant at 50m predict_linear(testcounter_reset_middle[50m], 1h) - {} 76.81818181818181 + {} 70 # intercept at t = 3000+3600 = 6600 -eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) +eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) {} 76.81818181818181 -eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 1h) +eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 1h) {} 76.81818181818181 # intercept at t = 600+3600 = 4200 -eval instant at 10m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) +eval instant at 10m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) {} 51.36363636363637 # intercept at t = 4200+3600 = 7800 -eval instant at 70m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) +eval instant at 70m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) {} 89.54545454545455 # With http_requests, there is a sample value exactly at the end of @@ -467,7 +478,7 @@ load 5m http_requests{job="api-server", instance="1", group="production"} 0+20x10 http_requests{job="api-server", instance="0", group="canary"} 0+30x10 http_requests{job="api-server", instance="1", group="canary"} 0+40x10 - http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN + http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN http_requests{job="app-server", instance="0", group="production"} 0+50x10 http_requests{job="app-server", instance="1", group="production"} 0+60x10 http_requests{job="app-server", instance="0", group="canary"} 0+70x10 @@ -502,7 +513,7 @@ load 5m http_requests{job="api-server", instance="1", group="production"} 0+20x10 http_requests{job="api-server", instance="0", group="canary"} 0+30x10 http_requests{job="api-server", instance="1", group="canary"} 0+40x10 - http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN + http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN http_requests{job="app-server", instance="0", group="production"} 0+50x10 http_requests{job="app-server", instance="1", group="production"} 0+60x10 http_requests{job="app-server", instance="0", group="canary"} 0+70x10 @@ -640,7 +651,7 @@ eval_ordered instant at 50m sort_by_label(node_uname_info, "release") node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 100 node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 100 -# Tests for holt_winters +# Tests for double_exponential_smoothing clear # positive trends @@ -650,7 +661,7 @@ load 10s http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 http_requests{job="api-server", instance="1", group="canary"} 0+40x2000 -eval instant at 8000s holt_winters(http_requests[1m], 0.01, 0.1) +eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) {job="api-server", instance="0", group="production"} 8000 {job="api-server", instance="1", group="production"} 16000 {job="api-server", instance="0", group="canary"} 24000 @@ -664,7 +675,7 @@ load 10s http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300-80x1000 http_requests{job="api-server", instance="1", group="canary"} 0-40x1000 0+40x1000 -eval instant at 8000s holt_winters(http_requests[1m], 0.01, 0.1) +eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) {job="api-server", instance="0", group="production"} 0 {job="api-server", instance="1", group="production"} -16000 {job="api-server", instance="0", group="canary"} 24000 @@ -688,10 +699,10 @@ load 10s metric9 -9.988465674311579e+307 -9.988465674311579e+307 -9.988465674311579e+307 metric10 -9.988465674311579e+307 9.988465674311579e+307 -eval instant at 1m avg_over_time(metric[1m]) +eval instant at 55s avg_over_time(metric[1m]) {} 3 -eval instant at 1m sum_over_time(metric[1m])/count_over_time(metric[1m]) +eval instant at 55s sum_over_time(metric[1m])/count_over_time(metric[1m]) {} 3 eval instant at 1m avg_over_time(metric2[1m]) @@ -758,8 +769,8 @@ eval instant at 1m avg_over_time(metric8[1m]) {} 9.988465674311579e+307 # This overflows float64. -eval instant at 1m sum_over_time(metric8[1m])/count_over_time(metric8[1m]) - {} Inf +eval instant at 1m sum_over_time(metric8[2m])/count_over_time(metric8[2m]) + {} +Inf eval instant at 1m avg_over_time(metric9[1m]) {} -9.988465674311579e+307 @@ -768,10 +779,16 @@ eval instant at 1m avg_over_time(metric9[1m]) eval instant at 1m sum_over_time(metric9[1m])/count_over_time(metric9[1m]) {} -Inf -eval instant at 1m avg_over_time(metric10[1m]) +eval instant at 45s avg_over_time(metric10[1m]) + {} 0 + +eval instant at 1m avg_over_time(metric10[2m]) {} 0 -eval instant at 1m sum_over_time(metric10[1m])/count_over_time(metric10[1m]) +eval instant at 45s sum_over_time(metric10[1m])/count_over_time(metric10[1m]) + {} 0 + +eval instant at 1m sum_over_time(metric10[2m])/count_over_time(metric10[2m]) {} 0 # Test if very big intermediate values cause loss of detail. @@ -779,10 +796,10 @@ clear load 10s metric 1 1e100 1 -1e100 -eval instant at 1m sum_over_time(metric[1m]) +eval instant at 1m sum_over_time(metric[2m]) {} 2 -eval instant at 1m avg_over_time(metric[1m]) +eval instant at 1m avg_over_time(metric[2m]) {} 0.5 # Tests for stddev_over_time and stdvar_over_time. @@ -790,13 +807,13 @@ clear load 10s metric 0 8 8 2 3 -eval instant at 1m stdvar_over_time(metric[1m]) +eval instant at 1m stdvar_over_time(metric[2m]) {} 10.56 -eval instant at 1m stddev_over_time(metric[1m]) +eval instant at 1m stddev_over_time(metric[2m]) {} 3.249615 -eval instant at 1m stddev_over_time((metric[1m])) +eval instant at 1m stddev_over_time((metric[2m])) {} 3.249615 # Tests for stddev_over_time and stdvar_over_time #4927. @@ -826,42 +843,42 @@ load 10s data{test="three samples"} 0 1 2 data{test="uneven samples"} 0 1 4 -eval instant at 1m quantile_over_time(0, data[1m]) +eval instant at 1m quantile_over_time(0, data[2m]) {test="two samples"} 0 {test="three samples"} 0 {test="uneven samples"} 0 -eval instant at 1m quantile_over_time(0.5, data[1m]) +eval instant at 1m quantile_over_time(0.5, data[2m]) {test="two samples"} 0.5 {test="three samples"} 1 {test="uneven samples"} 1 -eval instant at 1m quantile_over_time(0.75, data[1m]) +eval instant at 1m quantile_over_time(0.75, data[2m]) {test="two samples"} 0.75 {test="three samples"} 1.5 {test="uneven samples"} 2.5 -eval instant at 1m quantile_over_time(0.8, data[1m]) +eval instant at 1m quantile_over_time(0.8, data[2m]) {test="two samples"} 0.8 {test="three samples"} 1.6 {test="uneven samples"} 2.8 -eval instant at 1m quantile_over_time(1, data[1m]) +eval instant at 1m quantile_over_time(1, data[2m]) {test="two samples"} 1 {test="three samples"} 2 {test="uneven samples"} 4 -eval_warn instant at 1m quantile_over_time(-1, data[1m]) +eval_warn instant at 1m quantile_over_time(-1, data[2m]) {test="two samples"} -Inf {test="three samples"} -Inf {test="uneven samples"} -Inf -eval_warn instant at 1m quantile_over_time(2, data[1m]) +eval_warn instant at 1m quantile_over_time(2, data[2m]) {test="two samples"} +Inf {test="three samples"} +Inf {test="uneven samples"} +Inf -eval_warn instant at 1m (quantile_over_time(2, (data[1m]))) +eval_warn instant at 1m (quantile_over_time(2, (data[2m]))) {test="two samples"} +Inf {test="three samples"} +Inf {test="uneven samples"} +Inf @@ -969,21 +986,21 @@ load 10s data{type="some_nan3"} NaN 0 1 data{type="only_nan"} NaN NaN NaN -eval instant at 1m min_over_time(data[1m]) +eval instant at 1m min_over_time(data[2m]) {type="numbers"} 0 {type="some_nan"} 0 {type="some_nan2"} 1 {type="some_nan3"} 0 {type="only_nan"} NaN -eval instant at 1m max_over_time(data[1m]) +eval instant at 1m max_over_time(data[2m]) {type="numbers"} 3 {type="some_nan"} 2 {type="some_nan2"} 2 {type="some_nan3"} 1 {type="only_nan"} NaN -eval instant at 1m last_over_time(data[1m]) +eval instant at 1m last_over_time(data[2m]) data{type="numbers"} 3 data{type="some_nan"} NaN data{type="some_nan2"} 1 @@ -1076,13 +1093,19 @@ eval instant at 1m absent_over_time(httpd_log_lines_total[30s]) {} 1 eval instant at 15m absent_over_time(http_requests[5m]) - -eval instant at 16m absent_over_time(http_requests[5m]) {} 1 +eval instant at 15m absent_over_time(http_requests[10m]) + eval instant at 16m absent_over_time(http_requests[6m]) + {} 1 + +eval instant at 16m absent_over_time(http_requests[16m]) eval instant at 16m absent_over_time(httpd_handshake_failures_total[1m]) + {} 1 + +eval instant at 16m absent_over_time(httpd_handshake_failures_total[2m]) eval instant at 16m absent_over_time({instance="127.0.0.1"}[5m]) @@ -1138,17 +1161,18 @@ eval instant at 0m present_over_time(httpd_log_lines_total[30s]) eval instant at 1m present_over_time(httpd_log_lines_total[30s]) eval instant at 15m present_over_time(http_requests[5m]) + +eval instant at 15m present_over_time(http_requests[10m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 -eval instant at 16m present_over_time(http_requests[5m]) - eval instant at 16m present_over_time(http_requests[6m]) + +eval instant at 16m present_over_time(http_requests[16m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 eval instant at 16m present_over_time(httpd_handshake_failures_total[1m]) - {instance="127.0.0.1", job="node"} 1 eval instant at 16m present_over_time({instance="127.0.0.1"}[5m]) {instance="127.0.0.1",job="node"} 1 @@ -1169,59 +1193,59 @@ load 5m exp_root_log{l="x"} 10 exp_root_log{l="y"} 20 -eval instant at 5m exp(exp_root_log) +eval instant at 1m exp(exp_root_log) {l="x"} 22026.465794806718 {l="y"} 485165195.4097903 -eval instant at 5m exp(exp_root_log - 10) +eval instant at 1m exp(exp_root_log - 10) {l="y"} 22026.465794806718 {l="x"} 1 -eval instant at 5m exp(exp_root_log - 20) +eval instant at 1m exp(exp_root_log - 20) {l="x"} 4.5399929762484854e-05 {l="y"} 1 -eval instant at 5m ln(exp_root_log) +eval instant at 1m ln(exp_root_log) {l="x"} 2.302585092994046 {l="y"} 2.995732273553991 -eval instant at 5m ln(exp_root_log - 10) +eval instant at 1m ln(exp_root_log - 10) {l="y"} 2.302585092994046 {l="x"} -Inf -eval instant at 5m ln(exp_root_log - 20) +eval instant at 1m ln(exp_root_log - 20) {l="y"} -Inf {l="x"} NaN -eval instant at 5m exp(ln(exp_root_log)) +eval instant at 1m exp(ln(exp_root_log)) {l="y"} 20 {l="x"} 10 -eval instant at 5m sqrt(exp_root_log) +eval instant at 1m sqrt(exp_root_log) {l="x"} 3.1622776601683795 {l="y"} 4.47213595499958 -eval instant at 5m log2(exp_root_log) +eval instant at 1m log2(exp_root_log) {l="x"} 3.3219280948873626 {l="y"} 4.321928094887363 -eval instant at 5m log2(exp_root_log - 10) +eval instant at 1m log2(exp_root_log - 10) {l="y"} 3.3219280948873626 {l="x"} -Inf -eval instant at 5m log2(exp_root_log - 20) +eval instant at 1m log2(exp_root_log - 20) {l="x"} NaN {l="y"} -Inf -eval instant at 5m log10(exp_root_log) +eval instant at 1m log10(exp_root_log) {l="x"} 1 {l="y"} 1.301029995663981 -eval instant at 5m log10(exp_root_log - 10) +eval instant at 1m log10(exp_root_log - 10) {l="y"} 1 {l="x"} -Inf -eval instant at 5m log10(exp_root_log - 20) +eval instant at 1m log10(exp_root_log - 20) {l="x"} NaN {l="y"} -Inf @@ -1234,3 +1258,12 @@ load 1m # We expect the value to be 0 for t=0s to t=59s (inclusive), then 60 for t=60s and t=61s. eval range from 0 to 61s step 1s timestamp(metric) {} 0x59 60 60 + +clear + +# Check round with mixed data types +load 1m + mixed_metric {{schema:0 sum:5 count:4 buckets:[1 2 1]}} 1 2 3 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:0 sum:8 count:6 buckets:[1 4 1]}} + +eval range from 0 to 5m step 1m round(mixed_metric) + {} _ 1 2 3 diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test index 47cba799352..6089fd01d20 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test @@ -108,8 +108,8 @@ eval instant at 50m histogram_stdvar(testhistogram3) eval instant at 50m histogram_fraction(0, 0.2, testhistogram3) {start="positive"} 0.6363636363636364 {start="negative"} 0 - -eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m])) + +eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[10m])) {start="positive"} 0.6363636363636364 {start="negative"} 0 @@ -118,8 +118,8 @@ eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m])) eval instant at 50m testhistogram3_bucket{le=".2"} / ignoring(le) testhistogram3_count {start="positive"} 0.6363636363636364 - -eval instant at 50m rate(testhistogram3_bucket{le=".2"}[5m]) / ignoring(le) rate(testhistogram3_count[5m]) + +eval instant at 50m rate(testhistogram3_bucket{le=".2"}[10m]) / ignoring(le) rate(testhistogram3_count[10m]) {start="positive"} 0.6363636363636364 # Test histogram_quantile, native and classic. @@ -241,28 +241,27 @@ eval instant at 50m histogram_quantile(0.8, testhistogram_bucket) {start="negative"} 0.3 # More realistic with rates. - -eval instant at 50m histogram_quantile(0.2, rate(testhistogram[5m])) +eval instant at 50m histogram_quantile(0.2, rate(testhistogram[10m])) {start="positive"} 0.048 {start="negative"} -0.2 -eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m])) +eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[10m])) {start="positive"} 0.048 {start="negative"} -0.2 -eval instant at 50m histogram_quantile(0.5, rate(testhistogram[5m])) +eval instant at 50m histogram_quantile(0.5, rate(testhistogram[10m])) {start="positive"} 0.15 {start="negative"} -0.15 -eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m])) +eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[10m])) {start="positive"} 0.15 {start="negative"} -0.15 -eval instant at 50m histogram_quantile(0.8, rate(testhistogram[5m])) +eval instant at 50m histogram_quantile(0.8, rate(testhistogram[10m])) {start="positive"} 0.72 {start="negative"} 0.3 -eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m])) +eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[10m])) {start="positive"} 0.72 {start="negative"} 0.3 @@ -307,115 +306,112 @@ eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m])) # Aggregated histogram: Everything in one. Note how native histograms # don't require aggregation by le. -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m]))) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m]))) {} 0.075 -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le)) {} 0.075 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m]))) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m]))) {} 0.1277777777777778 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le)) {} 0.1277777777777778 # Aggregated histogram: Everything in one. Now with avg, which does not change anything. -eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[5m]))) +eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[10m]))) {} 0.075 -eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[10m])) by (le)) {} 0.075 -eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[5m]))) +eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[10m]))) {} 0.12777777777777778 -eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[10m])) by (le)) {} 0.12777777777777778 # Aggregated histogram: By instance. -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (instance)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (instance)) {instance="ins1"} 0.075 {instance="ins2"} 0.075 -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance)) {instance="ins1"} 0.075 {instance="ins2"} 0.075 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (instance)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (instance)) {instance="ins1"} 0.1333333333 {instance="ins2"} 0.125 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance)) {instance="ins1"} 0.1333333333 {instance="ins2"} 0.125 # Aggregated histogram: By job. - -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job)) {job="job1"} 0.1 {job="job2"} 0.0642857142857143 -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job)) {job="job1"} 0.1 {job="job2"} 0.0642857142857143 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job)) {job="job1"} 0.14 {job="job2"} 0.1125 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job)) {job="job1"} 0.14 {job="job2"} 0.1125 # Aggregated histogram: By job and instance. - -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job, instance)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job, instance)) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance)) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job, instance)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job, instance)) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.1333333333333333 {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.1166666666666667 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance)) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.1333333333333333 {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.1166666666666667 # The unaggregated histogram for comparison. Same result as the previous one. - -eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[5m])) +eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[10m])) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m])) +eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[10m])) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[5m])) +eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[10m])) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.13333333333333333 {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.11666666666666667 -eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m])) +eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[10m])) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.13333333333333333 {instance="ins1", job="job2"} 0.1 @@ -425,6 +421,25 @@ eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket eval instant at 50m sum(request_duration_seconds) {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} +eval instant at 50m sum(request_duration_seconds{job="job1",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job1",instance="ins2"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins2"}) + {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} + +eval instant at 50m avg(request_duration_seconds) + {} {{schema:-53 count:62.5 custom_values:[0.1 0.2] buckets:[25 22.5 15]}} + +# To verify the result above, calculate from classic histogram as well. +eval instant at 50m avg (request_duration_seconds_bucket{le="0.1"}) + {} 25 + +eval instant at 50m avg (request_duration_seconds_bucket{le="0.2"}) - avg (request_duration_seconds_bucket{le="0.1"}) + {} 22.5 + +eval instant at 50m avg (request_duration_seconds_bucket{le="+Inf"}) - avg (request_duration_seconds_bucket{le="0.2"}) + {} 15 + +eval instant at 50m count(request_duration_seconds) + {} 4 + # A histogram with nonmonotonic bucket counts. This may happen when recording # rule evaluation or federation races scrape ingestion, causing some buckets # counts to be derived from fewer samples. @@ -448,19 +463,19 @@ eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket) {} 979.75 # Buckets with different representations of the same upper bound. -eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[5m])) +eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[10m])) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} NaN -eval instant at 50m histogram_quantile(0.5, rate(mixed[5m])) +eval instant at 50m histogram_quantile(0.5, rate(mixed[10m])) {instance="ins1", job="job1"} 0.2 {instance="ins2", job="job1"} NaN -eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m])) +eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[10m])) {instance="ins1", job="job1"} 0.2 {instance="ins2", job="job1"} NaN -eval instant at 50m histogram_quantile(1, rate(mixed_bucket[5m])) +eval instant at 50m histogram_quantile(1, rate(mixed_bucket[10m])) {instance="ins1", job="job1"} 0.2 {instance="ins2", job="job1"} NaN @@ -469,7 +484,7 @@ load_with_nhcb 5m empty_bucket{le="0.2", job="job1", instance="ins1"} 0x10 empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10 -eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m])) +eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[10m])) {instance="ins1", job="job1"} NaN # Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set. @@ -508,3 +523,36 @@ eval instant at 5m histogram_quantile(1.0, sum by (le) (rate(const_histogram_buc eval instant at 5m histogram_quantile(1.0, sum(rate(const_histogram[5m]))) {} NaN + +load_with_nhcb 1m + histogram_over_time_bucket{le="0"} 0 1 3 9 + histogram_over_time_bucket{le="1"} 2 3 3 9 + histogram_over_time_bucket{le="2"} 3 8 5 10 + histogram_over_time_bucket{le="4"} 3 10 6 18 + +# Test custom buckets with sum_over_time, avg_over_time. +eval instant at 3m sum_over_time(histogram_over_time[4m:1m]) + {} {{schema:-53 count:37 custom_values:[0 1 2 4] buckets:[13 4 9 11]}} + +eval instant at 3m avg_over_time(histogram_over_time[4m:1m]) + {} {{schema:-53 count:9.25 custom_values:[0 1 2 4] buckets:[3.25 1 2.25 2.75]}} + +# Test custom buckets with counter reset +load_with_nhcb 5m + histogram_with_reset_bucket{le="1"} 1 3 9 + histogram_with_reset_bucket{le="2"} 3 3 9 + histogram_with_reset_bucket{le="4"} 8 5 12 + histogram_with_reset_bucket{le="8"} 10 6 18 + histogram_with_reset_sum{} 36 16 61 + +eval instant at 10m increase(histogram_with_reset[15m]) + {} {{schema:-53 count:27 sum:91.5 custom_values:[1 2 4 8] counter_reset_hint:gauge buckets:[13.5 0 4.5 9]}} + +eval instant at 10m resets(histogram_with_reset[15m]) + {} 1 + +eval instant at 10m histogram_count(increase(histogram_with_reset[15m])) + {} 27 + +eval instant at 10m histogram_sum(increase(histogram_with_reset[15m])) + {} 91.5 diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test index c8c0eb285f8..d4a2ad257e1 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test @@ -4,85 +4,85 @@ load 5m another_metric{env="1"} 60 120 180 # Does not drop __name__ for vector selector -eval instant at 15m metric{env="1"} +eval instant at 10m metric{env="1"} metric{env="1"} 120 # Drops __name__ for unary operators -eval instant at 15m -metric +eval instant at 10m -metric {env="1"} -120 # Drops __name__ for binary operators -eval instant at 15m metric + another_metric +eval instant at 10m metric + another_metric {env="1"} 300 # Does not drop __name__ for binary comparison operators -eval instant at 15m metric <= another_metric +eval instant at 10m metric <= another_metric metric{env="1"} 120 # Drops __name__ for binary comparison operators with "bool" modifier -eval instant at 15m metric <= bool another_metric +eval instant at 10m metric <= bool another_metric {env="1"} 1 # Drops __name__ for vector-scalar operations -eval instant at 15m metric * 2 +eval instant at 10m metric * 2 {env="1"} 240 # Drops __name__ for instant-vector functions -eval instant at 15m clamp(metric, 0, 100) +eval instant at 10m clamp(metric, 0, 100) {env="1"} 100 # Drops __name__ for round function -eval instant at 15m round(metric) +eval instant at 10m round(metric) {env="1"} 120 # Drops __name__ for range-vector functions -eval instant at 15m rate(metric{env="1"}[10m]) +eval instant at 10m rate(metric{env="1"}[10m]) {env="1"} 0.2 # Does not drop __name__ for last_over_time function -eval instant at 15m last_over_time(metric{env="1"}[10m]) +eval instant at 10m last_over_time(metric{env="1"}[10m]) metric{env="1"} 120 # Drops name for other _over_time functions -eval instant at 15m max_over_time(metric{env="1"}[10m]) +eval instant at 10m max_over_time(metric{env="1"}[10m]) {env="1"} 120 # Allows relabeling (to-be-dropped) __name__ via label_replace -eval instant at 15m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)") +eval instant at 10m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)") {my_name="rate_metric", env="1"} 0.2 {my_name="rate_another_metric", env="1"} 0.2 # Allows preserving __name__ via label_replace -eval instant at 15m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)") +eval instant at 10m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)") rate_metric{env="1"} 0.2 rate_another_metric{env="1"} 0.2 # Allows relabeling (to-be-dropped) __name__ via label_join -eval instant at 15m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__") +eval instant at 10m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__") {my_name="metric", env="1"} 0.2 {my_name="another_metric", env="1"} 0.2 # Allows preserving __name__ via label_join -eval instant at 15m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env") +eval instant at 10m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env") metric_1{env="1"} 0.2 another_metric_1{env="1"} 0.2 # Does not drop metric names fro aggregation operators -eval instant at 15m sum by (__name__, env) (metric{env="1"}) +eval instant at 10m sum by (__name__, env) (metric{env="1"}) metric{env="1"} 120 # Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label) # This is an accidental side effect of delayed __name__ label dropping -eval_fail instant at 15m sum by (__name__) (rate({env="1"}[10m])) +eval_fail instant at 10m sum by (__name__) (rate({env="1"}[10m])) # Aggregation operators aggregate metrics with same labelset and to-be-dropped names # This is an accidental side effect of delayed __name__ label dropping -eval instant at 15m sum(rate({env="1"}[10m])) by (env) +eval instant at 10m sum(rate({env="1"}[10m])) by (env) {env="1"} 0.4 # Aggregationk operators propagate __name__ label dropping information -eval instant at 15m topk(10, sum by (__name__, env) (metric{env="1"})) +eval instant at 10m topk(10, sum by (__name__, env) (metric{env="1"})) metric{env="1"} 120 -eval instant at 15m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m]))) +eval instant at 10m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m]))) {env="1"} 0.2 diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test index 7d2eec32cfa..0463384e2e9 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test @@ -2,55 +2,58 @@ load 5m empty_histogram {{}} -eval instant at 5m empty_histogram +eval instant at 1m empty_histogram {__name__="empty_histogram"} {{}} -eval instant at 5m histogram_count(empty_histogram) +eval instant at 1m histogram_count(empty_histogram) {} 0 -eval instant at 5m histogram_sum(empty_histogram) +eval instant at 1m histogram_sum(empty_histogram) {} 0 -eval instant at 5m histogram_avg(empty_histogram) +eval instant at 1m histogram_avg(empty_histogram) {} NaN -eval instant at 5m histogram_fraction(-Inf, +Inf, empty_histogram) +eval instant at 1m histogram_fraction(-Inf, +Inf, empty_histogram) {} NaN -eval instant at 5m histogram_fraction(0, 8, empty_histogram) +eval instant at 1m histogram_fraction(0, 8, empty_histogram) {} NaN - +clear # buckets:[1 2 1] means 1 observation in the 1st bucket, 2 observations in the 2nd and 1 observation in the 3rd (total 4). load 5m single_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}} # histogram_count extracts the count property from the histogram. -eval instant at 5m histogram_count(single_histogram) +eval instant at 1m histogram_count(single_histogram) {} 4 # histogram_sum extracts the sum property from the histogram. -eval instant at 5m histogram_sum(single_histogram) +eval instant at 1m histogram_sum(single_histogram) {} 5 # histogram_avg calculates the average from sum and count properties. -eval instant at 5m histogram_avg(single_histogram) +eval instant at 1m histogram_avg(single_histogram) {} 1.25 # We expect half of the values to fall in the range 1 < x <= 2. -eval instant at 5m histogram_fraction(1, 2, single_histogram) +eval instant at 1m histogram_fraction(1, 2, single_histogram) {} 0.5 # We expect all values to fall in the range 0 < x <= 8. -eval instant at 5m histogram_fraction(0, 8, single_histogram) +eval instant at 1m histogram_fraction(0, 8, single_histogram) {} 1 -# Median is 1.5 due to linear estimation of the midpoint of the middle bucket, whose values are within range 1 < x <= 2. -eval instant at 5m histogram_quantile(0.5, single_histogram) - {} 1.5 - +# Median is 1.414213562373095 (2**2**-1, or sqrt(2)) due to +# exponential interpolation, i.e. the "midpoint" within range 1 < x <= +# 2 is assumed where the bucket boundary would be if we increased the +# resolution of the histogram by one step. +eval instant at 1m histogram_quantile(0.5, single_histogram) + {} 1.414213562373095 +clear # Repeat the same histogram 10 times. load 5m @@ -68,8 +71,9 @@ eval instant at 5m histogram_avg(multi_histogram) eval instant at 5m histogram_fraction(1, 2, multi_histogram) {} 0.5 +# See explanation for exponential interpolation above. eval instant at 5m histogram_quantile(0.5, multi_histogram) - {} 1.5 + {} 1.414213562373095 # Each entry should look the same as the first. @@ -85,10 +89,11 @@ eval instant at 50m histogram_avg(multi_histogram) eval instant at 50m histogram_fraction(1, 2, multi_histogram) {} 0.5 +# See explanation for exponential interpolation above. eval instant at 50m histogram_quantile(0.5, multi_histogram) - {} 1.5 - + {} 1.414213562373095 +clear # Accumulate the histogram addition for 10 iterations, offset is a bucket position where offset:0 is always the bucket # with an upper limit of 1 and offset:1 is the bucket which follows to the right. Negative offsets represent bucket @@ -109,8 +114,9 @@ eval instant at 5m histogram_avg(incr_histogram) eval instant at 5m histogram_fraction(1, 2, incr_histogram) {} 0.6 +# See explanation for exponential interpolation above. eval instant at 5m histogram_quantile(0.5, incr_histogram) - {} 1.5 + {} 1.414213562373095 eval instant at 50m incr_histogram @@ -129,18 +135,20 @@ eval instant at 50m histogram_avg(incr_histogram) eval instant at 50m histogram_fraction(1, 2, incr_histogram) {} 0.8571428571428571 +# See explanation for exponential interpolation above. eval instant at 50m histogram_quantile(0.5, incr_histogram) - {} 1.5 + {} 1.414213562373095 # Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum. -eval instant at 50m rate(incr_histogram[5m]) - {} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}} +eval instant at 50m rate(incr_histogram[10m]) + {} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}} # Calculate the 50th percentile of observations over the last 10m. +# See explanation for exponential interpolation above. eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m])) - {} 1.5 - + {} 1.414213562373095 +clear # Schema represents the histogram resolution, different schema have compatible bucket boundaries, e.g.: # 0: 1 2 4 8 16 32 64 (higher resolution) @@ -166,77 +174,79 @@ eval instant at 5m histogram_avg(low_res_histogram) eval instant at 5m histogram_fraction(1, 4, low_res_histogram) {} 1 - +clear # z_bucket:1 means there is one observation in the zero bucket and z_bucket_w:0.5 means the zero bucket has the range # 0 < x <= 0.5. Sum and count are expected to represent all observations in the histogram, including those in the zero bucket. load 5m single_zero_histogram {{schema:0 z_bucket:1 z_bucket_w:0.5 sum:0.25 count:1}} -eval instant at 5m histogram_count(single_zero_histogram) +eval instant at 1m histogram_count(single_zero_histogram) {} 1 -eval instant at 5m histogram_sum(single_zero_histogram) +eval instant at 1m histogram_sum(single_zero_histogram) {} 0.25 -eval instant at 5m histogram_avg(single_zero_histogram) +eval instant at 1m histogram_avg(single_zero_histogram) {} 0.25 # When only the zero bucket is populated, or there are negative buckets, the distribution is assumed to be equally # distributed around zero; i.e. that there are an equal number of positive and negative observations. Therefore the # entire distribution must lie within the full range of the zero bucket, in this case: -0.5 < x <= +0.5. -eval instant at 5m histogram_fraction(-0.5, 0.5, single_zero_histogram) +eval instant at 1m histogram_fraction(-0.5, 0.5, single_zero_histogram) {} 1 # Half of the observations are estimated to be zero, as this is the midpoint between -0.5 and +0.5. -eval instant at 5m histogram_quantile(0.5, single_zero_histogram) +eval instant at 1m histogram_quantile(0.5, single_zero_histogram) {} 0 - +clear # Let's turn single_histogram upside-down. load 5m negative_histogram {{schema:0 sum:-5 count:4 n_buckets:[1 2 1]}} -eval instant at 5m histogram_count(negative_histogram) +eval instant at 1m histogram_count(negative_histogram) {} 4 -eval instant at 5m histogram_sum(negative_histogram) +eval instant at 1m histogram_sum(negative_histogram) {} -5 -eval instant at 5m histogram_avg(negative_histogram) +eval instant at 1m histogram_avg(negative_histogram) {} -1.25 # We expect half of the values to fall in the range -2 < x <= -1. -eval instant at 5m histogram_fraction(-2, -1, negative_histogram) +eval instant at 1m histogram_fraction(-2, -1, negative_histogram) {} 0.5 -eval instant at 5m histogram_quantile(0.5, negative_histogram) - {} -1.5 - +# Exponential interpolation works the same as for positive buckets, just mirrored. +eval instant at 1m histogram_quantile(0.5, negative_histogram) + {} -1.414213562373095 +clear # Two histogram samples. load 5m two_samples_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}} {{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}} # We expect to see the newest sample. -eval instant at 10m histogram_count(two_samples_histogram) +eval instant at 5m histogram_count(two_samples_histogram) {} 4 -eval instant at 10m histogram_sum(two_samples_histogram) +eval instant at 5m histogram_sum(two_samples_histogram) {} -4 -eval instant at 10m histogram_avg(two_samples_histogram) +eval instant at 5m histogram_avg(two_samples_histogram) {} -1 -eval instant at 10m histogram_fraction(-2, -1, two_samples_histogram) +eval instant at 5m histogram_fraction(-2, -1, two_samples_histogram) {} 0.5 -eval instant at 10m histogram_quantile(0.5, two_samples_histogram) - {} -1.5 - +# See explanation for exponential interpolation above. +eval instant at 5m histogram_quantile(0.5, two_samples_histogram) + {} -1.414213562373095 +clear # Add two histograms with negated data. load 5m @@ -259,6 +269,8 @@ eval instant at 5m histogram_fraction(0, 4, balanced_histogram) eval instant at 5m histogram_quantile(0.5, balanced_histogram) {} 0.5 +clear + # Add histogram to test sum(last_over_time) regression load 5m incr_sum_histogram{number="1"} {{schema:0 sum:0 count:0 buckets:[1]}}+{{schema:0 sum:1 count:1 buckets:[1]}}x10 @@ -270,6 +282,8 @@ eval instant at 50m histogram_sum(sum(incr_sum_histogram)) eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m]))) {} 30 +clear + # Apply rate function to histogram. load 15s histogram_rate {{schema:1 count:12 sum:18.4 z_bucket:2 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[1 2 0 1 1]}}+{{schema:1 count:9 sum:18.4 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 0 1 1] n_buckets:[1 1 0 1 1]}}x100 @@ -280,6 +294,8 @@ eval instant at 5m rate(histogram_rate[45s]) eval range from 5m to 5m30s step 30s rate(histogram_rate[45s]) {} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}x1 +clear + # Apply count and sum function to histogram. load 10m histogram_count_sum_2 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 @@ -290,6 +306,8 @@ eval instant at 10m histogram_count(histogram_count_sum_2) eval instant at 10m histogram_sum(histogram_count_sum_2) {} 100 +clear + # Apply stddev and stdvar function to histogram with {1, 2, 3, 4} (low res). load 10m histogram_stddev_stdvar_1 {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}x1 @@ -300,6 +318,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_1) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_1) {} 1.163807968526718 +clear + # Apply stddev and stdvar function to histogram with {1, 1, 1, 1} (high res). load 10m histogram_stddev_stdvar_2 {{schema:8 count:10 sum:10 buckets:[1 2 3 4]}}x1 @@ -310,6 +330,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_2) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_2) {} 2.3971123370139447e-05 +clear + # Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9}. load 10m histogram_stddev_stdvar_3 {{schema:3 count:7 sum:62 z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 @@ -320,6 +342,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_3) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_3) {} 1844.4651144196398 +clear + # Apply stddev and stdvar function to histogram with {-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3}. load 10m histogram_stddev_stdvar_4 {{schema:0 count:10 sum:-112946 z_bucket:0 n_buckets:[0 0 1 1 1 0 1 1 0 0 3 0 0 0 1 0 0 1]}}x1 @@ -330,6 +354,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_4) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_4) {} 759352122.1939945 +clear + # Apply stddev and stdvar function to histogram with {-10x10}. load 10m histogram_stddev_stdvar_5 {{schema:0 count:10 sum:-100 z_bucket:0 n_buckets:[0 0 0 0 10]}}x1 @@ -340,6 +366,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_5) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_5) {} 1.725830020304794 +clear + # Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, NaN}. load 10m histogram_stddev_stdvar_6 {{schema:3 count:7 sum:NaN z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 @@ -350,6 +378,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_6) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_6) {} NaN +clear + # Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, Inf}. load 10m histogram_stddev_stdvar_7 {{schema:3 count:7 sum:Inf z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 @@ -360,6 +390,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_7) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7) {} Inf +clear + # Apply quantile function to histogram with all positive buckets with zero bucket. load 10m histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1 @@ -370,20 +402,24 @@ eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_1) eval instant at 10m histogram_quantile(1, histogram_quantile_1) {} 16 +# The following quantiles are within a bucket. Exponential +# interpolation is applied (rather than linear, as it is done for +# classic histograms), leading to slightly different quantile values. eval instant at 10m histogram_quantile(0.99, histogram_quantile_1) - {} 15.759999999999998 + {} 15.67072476139083 eval instant at 10m histogram_quantile(0.9, histogram_quantile_1) - {} 13.600000000000001 + {} 12.99603834169977 eval instant at 10m histogram_quantile(0.6, histogram_quantile_1) - {} 4.799999999999997 + {} 4.594793419988138 eval instant at 10m histogram_quantile(0.5, histogram_quantile_1) - {} 1.6666666666666665 + {} 1.5874010519681994 +# Linear interpolation within the zero bucket after all. eval instant at 10m histogram_quantile(0.1, histogram_quantile_1) - {} 0.0006000000000000001 + {} 0.0006 eval instant at 10m histogram_quantile(0, histogram_quantile_1) {} 0 @@ -391,6 +427,8 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_1) eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_1) {} -Inf +clear + # Apply quantile function to histogram with all negative buckets with zero bucket. load 10m histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1 @@ -401,17 +439,20 @@ eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_2) eval instant at 10m histogram_quantile(1, histogram_quantile_2) {} 0 +# Again, the quantile values here are slightly different from what +# they would be with linear interpolation. Note that quantiles +# ending up in the zero bucket are linearly interpolated after all. eval instant at 10m histogram_quantile(0.99, histogram_quantile_2) - {} -6.000000000000048e-05 + {} -0.00006 eval instant at 10m histogram_quantile(0.9, histogram_quantile_2) - {} -0.0005999999999999996 + {} -0.0006 eval instant at 10m histogram_quantile(0.5, histogram_quantile_2) - {} -1.6666666666666667 + {} -1.5874010519681996 eval instant at 10m histogram_quantile(0.1, histogram_quantile_2) - {} -13.6 + {} -12.996038341699768 eval instant at 10m histogram_quantile(0, histogram_quantile_2) {} -16 @@ -419,7 +460,11 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_2) eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_2) {} -Inf -# Apply quantile function to histogram with both positive and negative buckets with zero bucket. +clear + +# Apply quantile function to histogram with both positive and negative +# buckets with zero bucket. +# First positive buckets with exponential interpolation. load 10m histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 @@ -430,31 +475,34 @@ eval instant at 10m histogram_quantile(1, histogram_quantile_3) {} 16 eval instant at 10m histogram_quantile(0.99, histogram_quantile_3) - {} 15.519999999999996 + {} 15.34822590920423 eval instant at 10m histogram_quantile(0.9, histogram_quantile_3) - {} 11.200000000000003 + {} 10.556063286183155 eval instant at 10m histogram_quantile(0.7, histogram_quantile_3) - {} 1.2666666666666657 + {} 1.2030250360821164 +# Linear interpolation in the zero bucket, symmetrically centered around +# the zero point. eval instant at 10m histogram_quantile(0.55, histogram_quantile_3) - {} 0.0006000000000000005 + {} 0.0006 eval instant at 10m histogram_quantile(0.5, histogram_quantile_3) {} 0 eval instant at 10m histogram_quantile(0.45, histogram_quantile_3) - {} -0.0005999999999999996 + {} -0.0006 +# Finally negative buckets with mirrored exponential interpolation. eval instant at 10m histogram_quantile(0.3, histogram_quantile_3) - {} -1.266666666666667 + {} -1.2030250360821169 eval instant at 10m histogram_quantile(0.1, histogram_quantile_3) - {} -11.2 + {} -10.556063286183155 eval instant at 10m histogram_quantile(0.01, histogram_quantile_3) - {} -15.52 + {} -15.34822590920423 eval instant at 10m histogram_quantile(0, histogram_quantile_3) {} -16 @@ -462,6 +510,92 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_3) eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_3) {} -Inf +clear + +# Try different schemas. (The interpolation logic must not depend on the schema.) +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 buckets:[0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 buckets:[0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 buckets:[0 5]}} + +eval instant at 1m histogram_quantile(0.5, var_res_histogram) + {schema="-1"} 2.0 + {schema="0"} 1.4142135623730951 + {schema="+1"} 1.189207 + +eval instant at 1m histogram_fraction(0, 2, var_res_histogram{schema="-1"}) + {schema="-1"} 0.5 + +eval instant at 1m histogram_fraction(0, 1.4142135623730951, var_res_histogram{schema="0"}) + {schema="0"} 0.5 + +eval instant at 1m histogram_fraction(0, 1.189207, var_res_histogram{schema="+1"}) + {schema="+1"} 0.5 + +# The same as above, but one bucket "further to the right". +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 buckets:[0 0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 buckets:[0 0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 buckets:[0 0 5]}} + +eval instant at 1m histogram_quantile(0.5, var_res_histogram) + {schema="-1"} 8.0 + {schema="0"} 2.82842712474619 + {schema="+1"} 1.6817928305074292 + +eval instant at 1m histogram_fraction(0, 8, var_res_histogram{schema="-1"}) + {schema="-1"} 0.5 + +eval instant at 1m histogram_fraction(0, 2.82842712474619, var_res_histogram{schema="0"}) + {schema="0"} 0.5 + +eval instant at 1m histogram_fraction(0, 1.6817928305074292, var_res_histogram{schema="+1"}) + {schema="+1"} 0.5 + +# And everything again but for negative buckets. +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 n_buckets:[0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 n_buckets:[0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 n_buckets:[0 5]}} + +eval instant at 1m histogram_quantile(0.5, var_res_histogram) + {schema="-1"} -2.0 + {schema="0"} -1.4142135623730951 + {schema="+1"} -1.189207 + +eval instant at 1m histogram_fraction(-2, 0, var_res_histogram{schema="-1"}) + {schema="-1"} 0.5 + +eval instant at 1m histogram_fraction(-1.4142135623730951, 0, var_res_histogram{schema="0"}) + {schema="0"} 0.5 + +eval instant at 1m histogram_fraction(-1.189207, 0, var_res_histogram{schema="+1"}) + {schema="+1"} 0.5 + +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 n_buckets:[0 0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 n_buckets:[0 0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 n_buckets:[0 0 5]}} + +eval instant at 1m histogram_quantile(0.5, var_res_histogram) + {schema="-1"} -8.0 + {schema="0"} -2.82842712474619 + {schema="+1"} -1.6817928305074292 + +eval instant at 1m histogram_fraction(-8, 0, var_res_histogram{schema="-1"}) + {schema="-1"} 0.5 + +eval instant at 1m histogram_fraction(-2.82842712474619, 0, var_res_histogram{schema="0"}) + {schema="0"} 0.5 + +eval instant at 1m histogram_fraction(-1.6817928305074292, 0, var_res_histogram{schema="+1"}) + {schema="+1"} 0.5 + + # Apply fraction function to empty histogram. load 10m histogram_fraction_1 {{}}x1 @@ -469,6 +603,8 @@ load 10m eval instant at 10m histogram_fraction(3.1415, 42, histogram_fraction_1) {} NaN +clear + # Apply fraction function to histogram with positive and zero buckets. load 10m histogram_fraction_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1 @@ -485,11 +621,18 @@ eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_2) eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_2) {} 0.16666666666666666 +# Note that this result and the one above add up to 1. +eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2) + {} 0.8333333333333334 + +# We are in the zero bucket, resulting in linear interpolation eval instant at 10m histogram_fraction(0, 0.0005, histogram_fraction_2) {} 0.08333333333333333 -eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2) - {} 0.8333333333333334 +# Demonstrate that the inverse operation with histogram_quantile yields +# the original value with the non-trivial result above. +eval instant at 10m histogram_quantile(0.08333333333333333, histogram_fraction_2) + {} 0.0005 eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_2) {} 0 @@ -497,17 +640,30 @@ eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_2) eval instant at 10m histogram_fraction(1, 2, histogram_fraction_2) {} 0.25 +# More non-trivial results with interpolation involved below, including +# some round-trips via histogram_quantile to prove that the inverse +# operation leads to the same results. + +eval instant at 10m histogram_fraction(0, 1.5, histogram_fraction_2) + {} 0.4795739585136224 + eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_2) - {} 0.125 + {} 0.10375937481971091 eval instant at 10m histogram_fraction(1, 8, histogram_fraction_2) {} 0.3333333333333333 +eval instant at 10m histogram_fraction(0, 6, histogram_fraction_2) + {} 0.6320802083934297 + +eval instant at 10m histogram_quantile(0.6320802083934297, histogram_fraction_2) + {} 6 + eval instant at 10m histogram_fraction(1, 6, histogram_fraction_2) - {} 0.2916666666666667 + {} 0.29874687506009634 eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_2) - {} 0.16666666666666666 + {} 0.15250624987980724 eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_2) {} 0 @@ -570,6 +726,12 @@ eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_3) eval instant at 10m histogram_fraction(-0.0005, 0, histogram_fraction_3) {} 0.08333333333333333 +eval instant at 10m histogram_fraction(-inf, -0.0005, histogram_fraction_3) + {} 0.9166666666666666 + +eval instant at 10m histogram_quantile(0.9166666666666666, histogram_fraction_3) + {} -0.0005 + eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_3) {} 0 @@ -595,16 +757,22 @@ eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_3) {} 0.25 eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_3) - {} 0.125 + {} 0.10375937481971091 eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_3) {} 0.3333333333333333 +eval instant at 10m histogram_fraction(-inf, -6, histogram_fraction_3) + {} 0.36791979160657035 + +eval instant at 10m histogram_quantile(0.36791979160657035, histogram_fraction_3) + {} -6 + eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_3) - {} 0.2916666666666667 + {} 0.29874687506009634 eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_3) - {} 0.16666666666666666 + {} 0.15250624987980724 eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_3) {} 0 @@ -633,6 +801,8 @@ eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_3) eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_3) {} 1 +clear + # Apply fraction function to histogram with both positive, negative and zero buckets. load 10m histogram_fraction_4 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 @@ -652,6 +822,18 @@ eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_4) eval instant at 10m histogram_fraction(-0.0005, 0.0005, histogram_fraction_4) {} 0.08333333333333333 +eval instant at 10m histogram_fraction(-inf, 0.0005, histogram_fraction_4) + {} 0.5416666666666666 + +eval instant at 10m histogram_quantile(0.5416666666666666, histogram_fraction_4) + {} 0.0005 + +eval instant at 10m histogram_fraction(-inf, -0.0005, histogram_fraction_4) + {} 0.4583333333333333 + +eval instant at 10m histogram_quantile(0.4583333333333333, histogram_fraction_4) + {} -0.0005 + eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_4) {} 0.4166666666666667 @@ -662,31 +844,31 @@ eval instant at 10m histogram_fraction(1, 2, histogram_fraction_4) {} 0.125 eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_4) - {} 0.0625 + {} 0.051879687409855414 eval instant at 10m histogram_fraction(1, 8, histogram_fraction_4) {} 0.16666666666666666 eval instant at 10m histogram_fraction(1, 6, histogram_fraction_4) - {} 0.14583333333333334 + {} 0.14937343753004825 eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_4) - {} 0.08333333333333333 + {} 0.07625312493990366 eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_4) {} 0.125 eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_4) - {} 0.0625 + {} 0.051879687409855456 eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_4) {} 0.16666666666666666 eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_4) - {} 0.14583333333333334 + {} 0.14937343753004817 eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_4) - {} 0.08333333333333333 + {} 0.07625312493990362 eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_4) {} 0 @@ -766,18 +948,40 @@ eval instant at 10m histogram_mul_div*float_series_0 eval instant at 10m float_series_0*histogram_mul_div {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} -# TODO: (NeerajGartia21) remove all the histogram buckets in case of division with zero. See: https://github.com/prometheus/prometheus/issues/13934 eval instant at 10m histogram_mul_div/0 - {} {{schema:0 count:Inf sum:Inf z_bucket:Inf z_bucket_w:0.001 buckets:[Inf Inf Inf] n_buckets:[Inf Inf Inf]}} + {} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}} eval instant at 10m histogram_mul_div/float_series_0 - {} {{schema:0 count:Inf sum:Inf z_bucket:Inf z_bucket_w:0.001 buckets:[Inf Inf Inf] n_buckets:[Inf Inf Inf]}} + {} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}} eval instant at 10m histogram_mul_div*0/0 - {} {{schema:0 count:NaN sum:NaN z_bucket:NaN z_bucket_w:0.001 buckets:[NaN NaN NaN] n_buckets:[NaN NaN NaN]}} + {} {{schema:0 count:NaN sum:NaN z_bucket_w:0.001 z_bucket:NaN}} + +eval_info instant at 10m histogram_mul_div*histogram_mul_div + +eval_info instant at 10m histogram_mul_div/histogram_mul_div + +eval_info instant at 10m float_series_3/histogram_mul_div + +eval_info instant at 10m 0/histogram_mul_div clear +# Apply binary operators to mixed histogram and float samples. +# TODO:(NeerajGartia21) move these tests to their respective locations when tests from engine_test.go are be moved here. + +load 10m + histogram_sample {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 + float_sample 0x1 + +eval_info instant at 10m float_sample+histogram_sample + +eval_info instant at 10m histogram_sample+float_sample + +eval_info instant at 10m float_sample-histogram_sample + +eval_info instant at 10m histogram_sample-float_sample + # Counter reset only noticeable in a single bucket. load 5m reset_in_bucket {{schema:0 count:4 sum:5 buckets:[1 2 1]}} {{schema:0 count:5 sum:6 buckets:[1 1 3]}} {{schema:0 count:6 sum:7 buckets:[1 2 3]}} @@ -814,7 +1018,7 @@ load 30s some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}} # Test the case where we only have two points for rate -eval_warn instant at 30s rate(some_metric[30s]) +eval_warn instant at 30s rate(some_metric[1m]) {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} # Test the case where we have more than two points for rate @@ -836,11 +1040,11 @@ eval_warn instant at 1m30s rate(some_metric[1m]) # Should produce no results. # Start with custom, end with exponential. -eval_warn instant at 1m rate(some_metric[30s]) +eval_warn instant at 1m rate(some_metric[1m]) # Should produce no results. # Start with exponential, end with custom. -eval_warn instant at 30s rate(some_metric[30s]) +eval_warn instant at 30s rate(some_metric[1m]) # Should produce no results. clear @@ -975,8 +1179,10 @@ clear load 1m histogram_sum_over_time {{schema:0 count:25 sum:1234.5 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:0 count:41 sum:1111.1 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:1 count:0}} -eval instant at 3m sum_over_time(histogram_sum_over_time[3m:1m]) +eval instant at 3m sum_over_time(histogram_sum_over_time[4m:1m]) {} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}} -eval instant at 3m avg_over_time(histogram_sum_over_time[3m:1m]) +eval instant at 3m avg_over_time(histogram_sum_over_time[4m:1m]) {} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}} + +clear diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test index df2311b9bae..4b00831dfc8 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test @@ -7,6 +7,7 @@ load 5m http_requests{job="app-server", instance="1", group="production"} 0+60x10 http_requests{job="app-server", instance="0", group="canary"} 0+70x10 http_requests{job="app-server", instance="1", group="canary"} 0+80x10 + http_requests_histogram{job="app-server", instance="1", group="production"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}}x11 load 5m vector_matching_a{l="x"} 0+1x100 @@ -113,7 +114,7 @@ eval instant at 50m http_requests{job="api-server", group="canary"} http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="canary", instance="1", job="api-server"} 400 -eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[5m]) * 5 * 60 +eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[10m]) * 5 * 60 {group="canary", instance="0", job="api-server"} 330 {group="canary", instance="1", job="api-server"} 440 @@ -287,6 +288,26 @@ eval instant at 50m 1 == bool 1 eval instant at 50m http_requests{job="api-server", instance="0", group="production"} == bool 100 {job="api-server", instance="0", group="production"} 1 +# The histogram is ignored here so the result doesn't change but it has an info annotation now. +eval_info instant at 5m {job="app-server"} == 80 + http_requests{group="canary", instance="1", job="app-server"} 80 + +eval_info instant at 5m http_requests_histogram != 80 + +eval_info instant at 5m http_requests_histogram > 80 + +eval_info instant at 5m http_requests_histogram < 80 + +eval_info instant at 5m http_requests_histogram >= 80 + +eval_info instant at 5m http_requests_histogram <= 80 + +# Should produce valid results in case of (in)equality between two histograms. +eval instant at 5m http_requests_histogram == http_requests_histogram + http_requests_histogram{job="app-server", instance="1", group="production"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}} + +eval instant at 5m http_requests_histogram != http_requests_histogram + # group_left/group_right. clear @@ -308,65 +329,65 @@ load 5m threshold{instance="abc",job="node",target="a@b.com"} 0 # Copy machine role to node variable. -eval instant at 5m node_role * on (instance) group_right (role) node_var +eval instant at 1m node_role * on (instance) group_right (role) node_var {instance="abc",job="node",role="prometheus"} 2 -eval instant at 5m node_var * on (instance) group_left (role) node_role +eval instant at 1m node_var * on (instance) group_left (role) node_role {instance="abc",job="node",role="prometheus"} 2 -eval instant at 5m node_var * ignoring (role) group_left (role) node_role +eval instant at 1m node_var * ignoring (role) group_left (role) node_role {instance="abc",job="node",role="prometheus"} 2 -eval instant at 5m node_role * ignoring (role) group_right (role) node_var +eval instant at 1m node_role * ignoring (role) group_right (role) node_var {instance="abc",job="node",role="prometheus"} 2 # Copy machine role to node variable with instrumentation labels. -eval instant at 5m node_cpu * ignoring (role, mode) group_left (role) node_role +eval instant at 1m node_cpu * ignoring (role, mode) group_left (role) node_role {instance="abc",job="node",mode="idle",role="prometheus"} 3 {instance="abc",job="node",mode="user",role="prometheus"} 1 -eval instant at 5m node_cpu * on (instance) group_left (role) node_role +eval instant at 1m node_cpu * on (instance) group_left (role) node_role {instance="abc",job="node",mode="idle",role="prometheus"} 3 {instance="abc",job="node",mode="user",role="prometheus"} 1 # Ratio of total. -eval instant at 5m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu) +eval instant at 1m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu) {instance="abc",job="node",mode="idle"} .75 {instance="abc",job="node",mode="user"} .25 {instance="def",job="node",mode="idle"} .80 {instance="def",job="node",mode="user"} .20 -eval instant at 5m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu) +eval instant at 1m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu) {job="node",mode="idle"} 0.7857142857142857 {job="node",mode="user"} 0.21428571428571427 -eval instant at 5m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)) +eval instant at 1m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)) {} 1.0 -eval instant at 5m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu) +eval instant at 1m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu) {instance="abc",job="node",mode="idle"} .75 {instance="abc",job="node",mode="user"} .25 {instance="def",job="node",mode="idle"} .80 {instance="def",job="node",mode="user"} .20 -eval instant at 5m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu) +eval instant at 1m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu) {instance="abc",job="node",mode="idle"} .75 {instance="abc",job="node",mode="user"} .25 {instance="def",job="node",mode="idle"} .80 {instance="def",job="node",mode="user"} .20 -eval instant at 5m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu) +eval instant at 1m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu) {job="node",mode="idle"} 0.7857142857142857 {job="node",mode="user"} 0.21428571428571427 -eval instant at 5m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)) +eval instant at 1m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)) {} 1.0 # Copy over label from metric with no matching labels, without having to list cross-job target labels ('job' here). -eval instant at 5m node_cpu + on(dummy) group_left(foo) random*0 +eval instant at 1m node_cpu + on(dummy) group_left(foo) random*0 {instance="abc",job="node",mode="idle",foo="bar"} 3 {instance="abc",job="node",mode="user",foo="bar"} 1 {instance="def",job="node",mode="idle",foo="bar"} 8 @@ -374,12 +395,12 @@ eval instant at 5m node_cpu + on(dummy) group_left(foo) random*0 # Use threshold from metric, and copy over target. -eval instant at 5m node_cpu > on(job, instance) group_left(target) threshold +eval instant at 1m node_cpu > on(job, instance) group_left(target) threshold node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3 node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1 # Use threshold from metric, and a default (1) if it's not present. -eval instant at 5m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1)) +eval instant at 1m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1)) node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3 node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1 node_cpu{instance="def",job="node",mode="idle"} 8 @@ -387,37 +408,37 @@ eval instant at 5m node_cpu > on(job, instance) group_left(target) (threshold or # Check that binops drop the metric name. -eval instant at 5m node_cpu + 2 +eval instant at 1m node_cpu + 2 {instance="abc",job="node",mode="idle"} 5 {instance="abc",job="node",mode="user"} 3 {instance="def",job="node",mode="idle"} 10 {instance="def",job="node",mode="user"} 4 -eval instant at 5m node_cpu - 2 +eval instant at 1m node_cpu - 2 {instance="abc",job="node",mode="idle"} 1 {instance="abc",job="node",mode="user"} -1 {instance="def",job="node",mode="idle"} 6 {instance="def",job="node",mode="user"} 0 -eval instant at 5m node_cpu / 2 +eval instant at 1m node_cpu / 2 {instance="abc",job="node",mode="idle"} 1.5 {instance="abc",job="node",mode="user"} 0.5 {instance="def",job="node",mode="idle"} 4 {instance="def",job="node",mode="user"} 1 -eval instant at 5m node_cpu * 2 +eval instant at 1m node_cpu * 2 {instance="abc",job="node",mode="idle"} 6 {instance="abc",job="node",mode="user"} 2 {instance="def",job="node",mode="idle"} 16 {instance="def",job="node",mode="user"} 4 -eval instant at 5m node_cpu ^ 2 +eval instant at 1m node_cpu ^ 2 {instance="abc",job="node",mode="idle"} 9 {instance="abc",job="node",mode="user"} 1 {instance="def",job="node",mode="idle"} 64 {instance="def",job="node",mode="user"} 4 -eval instant at 5m node_cpu % 2 +eval instant at 1m node_cpu % 2 {instance="abc",job="node",mode="idle"} 1 {instance="abc",job="node",mode="user"} 1 {instance="def",job="node",mode="idle"} 0 @@ -432,14 +453,14 @@ load 5m metricB{baz="meh"} 4 # On with no labels, for metrics with no common labels. -eval instant at 5m random + on() metricA +eval instant at 1m random + on() metricA {} 5 # Ignoring with no labels is the same as no ignoring. -eval instant at 5m metricA + ignoring() metricB +eval instant at 1m metricA + ignoring() metricB {baz="meh"} 7 -eval instant at 5m metricA + metricB +eval instant at 1m metricA + metricB {baz="meh"} 7 clear @@ -457,16 +478,16 @@ load 5m test_total{instance="localhost"} 50 test_smaller{instance="localhost"} 10 -eval instant at 5m test_total > bool test_smaller +eval instant at 1m test_total > bool test_smaller {instance="localhost"} 1 -eval instant at 5m test_total > test_smaller +eval instant at 1m test_total > test_smaller test_total{instance="localhost"} 50 -eval instant at 5m test_total < bool test_smaller +eval instant at 1m test_total < bool test_smaller {instance="localhost"} 0 -eval instant at 5m test_total < test_smaller +eval instant at 1m test_total < test_smaller clear @@ -476,14 +497,313 @@ load 5m trigx{} 20 trigNaN{} NaN -eval instant at 5m trigy atan2 trigx +eval instant at 1m trigy atan2 trigx {} 0.4636476090008061 -eval instant at 5m trigy atan2 trigNaN +eval instant at 1m trigy atan2 trigNaN {} NaN -eval instant at 5m 10 atan2 20 +eval instant at 1m 10 atan2 20 0.4636476090008061 -eval instant at 5m 10 atan2 NaN +eval instant at 1m 10 atan2 NaN NaN + +clear + +# Test comparison operations with floats and histograms. +load 6m + left_floats 1 2 _ _ 3 stale 4 5 NaN Inf -Inf + right_floats 4 _ _ 5 3 7 -1 20 NaN Inf -Inf + left_histograms {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:4.5 count:5 buckets:[1 3 1]}} _ _ {{schema:3 sum:4.5 count:5 buckets:[1 3 1]}} + right_histograms {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:4 count:4 buckets:[1 2 1]}} {{schema:3 sum:4 count:4 buckets:[1 2 1]}} _ _ + right_floats_for_histograms 0 -1 2 3 4 + +eval range from 0 to 60m step 6m left_floats == right_floats + left_floats _ _ _ _ 3 _ _ _ _ Inf -Inf + +eval range from 0 to 60m step 6m left_floats == bool right_floats + {} 0 _ _ _ 1 _ 0 0 0 1 1 + +eval range from 0 to 60m step 6m left_floats == does_not_match + # No results. + +eval range from 0 to 24m step 6m left_histograms == right_histograms + left_histograms {{schema:3 sum:4 count:4 buckets:[1 2 1]}} _ _ _ _ + +eval range from 0 to 24m step 6m left_histograms == bool right_histograms + {} 1 0 _ _ _ + +eval_info range from 0 to 24m step 6m left_histograms == right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms == bool right_floats_for_histograms + # No results. + +eval range from 0 to 60m step 6m left_floats != right_floats + left_floats 1 _ _ _ _ _ 4 5 NaN _ _ + +eval range from 0 to 60m step 6m left_floats != bool right_floats + {} 1 _ _ _ 0 _ 1 1 1 0 0 + +eval range from 0 to 24m step 6m left_histograms != right_histograms + left_histograms _ {{schema:3 sum:4.5 count:5 buckets:[1 3 1]}} _ _ _ + +eval range from 0 to 24m step 6m left_histograms != bool right_histograms + {} 0 1 _ _ _ + +eval_info range from 0 to 24m step 6m left_histograms != right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms != bool right_floats_for_histograms + # No results. + +eval range from 0 to 60m step 6m left_floats > right_floats + left_floats _ _ _ _ _ _ 4 _ _ _ _ + +eval range from 0 to 60m step 6m left_floats > bool right_floats + {} 0 _ _ _ 0 _ 1 0 0 0 0 + +eval_info range from 0 to 24m step 6m left_histograms > right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > bool right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > bool right_floats_for_histograms + # No results. + +eval range from 0 to 60m step 6m left_floats >= right_floats + left_floats _ _ _ _ 3 _ 4 _ _ Inf -Inf + +eval range from 0 to 60m step 6m left_floats >= bool right_floats + {} 0 _ _ _ 1 _ 1 0 0 1 1 + +eval_info range from 0 to 24m step 6m left_histograms >= right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= bool right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= bool right_floats_for_histograms + # No results. + +eval range from 0 to 60m step 6m left_floats < right_floats + left_floats 1 _ _ _ _ _ _ 5 _ _ _ + +eval range from 0 to 60m step 6m left_floats < bool right_floats + {} 1 _ _ _ 0 _ 0 1 0 0 0 + +eval_info range from 0 to 24m step 6m left_histograms < right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < bool right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < bool right_floats_for_histograms + # No results. + +eval range from 0 to 60m step 6m left_floats <= right_floats + left_floats 1 _ _ _ 3 _ _ 5 _ Inf -Inf + +eval range from 0 to 60m step 6m left_floats <= bool right_floats + {} 1 _ _ _ 1 _ 0 1 0 1 1 + +eval_info range from 0 to 24m step 6m left_histograms <= right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= bool right_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= right_floats_for_histograms + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= bool right_floats_for_histograms + # No results. + +# Vector / scalar combinations with scalar on right side +eval range from 0 to 60m step 6m left_floats == 3 + left_floats _ _ _ _ 3 _ _ _ _ _ _ + +eval range from 0 to 60m step 6m left_floats != 3 + left_floats 1 2 _ _ _ _ 4 5 NaN Inf -Inf + +eval range from 0 to 60m step 6m left_floats > 3 + left_floats _ _ _ _ _ _ 4 5 _ Inf _ + +eval range from 0 to 60m step 6m left_floats >= 3 + left_floats _ _ _ _ 3 _ 4 5 _ Inf _ + +eval range from 0 to 60m step 6m left_floats < 3 + left_floats 1 2 _ _ _ _ _ _ _ _ -Inf + +eval range from 0 to 60m step 6m left_floats <= 3 + left_floats 1 2 _ _ 3 _ _ _ _ _ -Inf + +eval range from 0 to 60m step 6m left_floats == bool 3 + {} 0 0 _ _ 1 _ 0 0 0 0 0 + +eval range from 0 to 60m step 6m left_floats == Inf + left_floats _ _ _ _ _ _ _ _ _ Inf _ + +eval range from 0 to 60m step 6m left_floats == bool Inf + {} 0 0 _ _ 0 _ 0 0 0 1 0 + +eval range from 0 to 60m step 6m left_floats == NaN + # No results. + +eval range from 0 to 60m step 6m left_floats == bool NaN + {} 0 0 _ _ 0 _ 0 0 0 0 0 + +eval_info range from 0 to 24m step 6m left_histograms == 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms == 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms != 3 + # No results. + +eval range from 0 to 24m step 6m left_histograms != 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > 0 + # No results. + +eval range from 0 to 24m step 6m left_histograms >= 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= 3 + # No results. + +eval range from 0 to 24m step 6m left_histograms <= 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms == bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms == bool 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms != bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms != bool 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms > bool 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms >= bool 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms < bool 0 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= bool 3 + # No results. + +eval_info range from 0 to 24m step 6m left_histograms <= bool 0 + # No results. + +# Vector / scalar combinations with scalar on left side +eval range from 0 to 60m step 6m 3 == left_floats + left_floats _ _ _ _ 3 _ _ _ _ _ _ + +eval range from 0 to 60m step 6m 3 != left_floats + left_floats 1 2 _ _ _ _ 4 5 NaN Inf -Inf + +eval range from 0 to 60m step 6m 3 < left_floats + left_floats _ _ _ _ _ _ 4 5 _ Inf _ + +eval range from 0 to 60m step 6m 3 <= left_floats + left_floats _ _ _ _ 3 _ 4 5 _ Inf _ + +eval range from 0 to 60m step 6m 3 > left_floats + left_floats 1 2 _ _ _ _ _ _ _ _ -Inf + +eval range from 0 to 60m step 6m 3 >= left_floats + left_floats 1 2 _ _ 3 _ _ _ _ _ -Inf + +eval range from 0 to 60m step 6m 3 == bool left_floats + {} 0 0 _ _ 1 _ 0 0 0 0 0 + +eval range from 0 to 60m step 6m Inf == left_floats + left_floats _ _ _ _ _ _ _ _ _ Inf _ + +eval range from 0 to 60m step 6m Inf == bool left_floats + {} 0 0 _ _ 0 _ 0 0 0 1 0 + +eval range from 0 to 60m step 6m NaN == left_floats + # No results. + +eval range from 0 to 60m step 6m NaN == bool left_floats + {} 0 0 _ _ 0 _ 0 0 0 0 0 + +eval range from 0 to 24m step 6m 3 == left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 == left_histograms + # No results. + +eval range from 0 to 24m step 6m 3 != left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 != left_histograms + # No results. + +eval range from 0 to 24m step 6m 3 < left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 < left_histograms + # No results. + +eval range from 0 to 24m step 6m 3 < left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 < left_histograms + # No results. + +eval range from 0 to 24m step 6m 3 > left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 > left_histograms + # No results. + +eval range from 0 to 24m step 6m 3 >= left_histograms + # No results. + +eval range from 0 to 24m step 6m 0 >= left_histograms + # No results. + +clear diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/range_queries.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/range_queries.test index e6951096026..3bfe2ce4cb3 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/range_queries.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/range_queries.test @@ -1,18 +1,18 @@ # sum_over_time with all values -load 30s +load 15s bar 0 1 10 100 1000 -eval range from 0 to 2m step 1m sum_over_time(bar[30s]) +eval range from 0 to 1m step 30s sum_over_time(bar[30s]) {} 0 11 1100 clear # sum_over_time with trailing values -load 30s +load 15s bar 0 1 10 100 1000 0 0 0 0 eval range from 0 to 2m step 1m sum_over_time(bar[30s]) - {} 0 11 1100 + {} 0 1100 0 clear @@ -21,15 +21,15 @@ load 30s bar 0 1 10 100 1000 10000 100000 1000000 10000000 eval range from 0 to 4m step 1m sum_over_time(bar[30s]) - {} 0 11 1100 110000 11000000 + {} 0 10 1000 100000 10000000 clear # sum_over_time with all values random -load 30s +load 15s bar 5 17 42 2 7 905 51 -eval range from 0 to 3m step 1m sum_over_time(bar[30s]) +eval range from 0 to 90s step 30s sum_over_time(bar[30s]) {} 5 59 9 956 clear diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/staleness.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/staleness.test index 4fdbc997b7f..a48473d4398 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/staleness.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/staleness.test @@ -14,10 +14,10 @@ eval instant at 40s metric {__name__="metric"} 2 # It goes stale 5 minutes after the last sample. -eval instant at 330s metric +eval instant at 329s metric {__name__="metric"} 2 -eval instant at 331s metric +eval instant at 330s metric # Range vector ignores stale sample. @@ -30,9 +30,13 @@ eval instant at 10s count_over_time(metric[1s]) eval instant at 20s count_over_time(metric[1s]) eval instant at 20s count_over_time(metric[10s]) + +eval instant at 20s count_over_time(metric[20s]) {} 1 eval instant at 20s count_over_time(metric[10]) + +eval instant at 20s count_over_time(metric[20]) {} 1 @@ -48,7 +52,7 @@ eval instant at 0s metric eval instant at 150s metric {__name__="metric"} 0 -eval instant at 300s metric +eval instant at 299s metric {__name__="metric"} 0 -eval instant at 301s metric +eval instant at 300s metric diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test index 1d338d97642..3ac547a2b57 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test @@ -10,18 +10,18 @@ eval instant at 10s sum_over_time(metric[50s:5s]) # Every evaluation yields the last value, i.e. 2 eval instant at 5m sum_over_time(metric[50s:10s]) - {} 12 + {} 10 # Series becomes stale at 5m10s (5m after last sample) -# Hence subquery gets a single sample at 6m-50s=5m10s. -eval instant at 6m sum_over_time(metric[50s:10s]) +# Hence subquery gets a single sample at 5m10s. +eval instant at 5m59s sum_over_time(metric[60s:10s]) {} 2 eval instant at 10s rate(metric[20s:10s]) {} 0.1 eval instant at 20s rate(metric[20s:5s]) - {} 0.05 + {} 0.06666666666666667 clear @@ -49,16 +49,16 @@ load 10s metric3 0+3x1000 eval instant at 1000s sum_over_time(metric1[30s:10s]) - {} 394 + {} 297 -# This is (394*2 - 100), because other than the last 100 at 1000s, +# This is (97 + 98*2 + 99*2 + 100), because other than 97@975s and 100@1000s, # everything else is repeated with the 5s step. eval instant at 1000s sum_over_time(metric1[30s:5s]) - {} 688 + {} 591 -# Offset is aligned with the step. +# Offset is aligned with the step, so this is from [98@980s, 99@990s, 100@1000s]. eval instant at 1010s sum_over_time(metric1[30s:10s] offset 10s) - {} 394 + {} 297 # Same result for different offsets due to step alignment. eval instant at 1010s sum_over_time(metric1[30s:10s] offset 9s) @@ -93,16 +93,16 @@ eval instant at 1010s sum_over_time((metric1)[30:10] offset 3) # Nested subqueries eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s]) - {} 0.4 + {} 0.30000000000000004 eval instant at 1000s rate(sum_over_time(metric2[30s:10s])[50s:10s]) - {} 0.8 + {} 0.6000000000000001 eval instant at 1000s rate(sum_over_time(metric3[30s:10s])[50s:10s]) - {} 1.2 + {} 0.9 eval instant at 1000s rate(sum_over_time((metric1+metric2+metric3)[30s:10s])[30s:10s]) - {} 2.4 + {} 1.8 clear @@ -115,16 +115,20 @@ load 7s eval instant at 80s rate(metric[1m]) {} 2.517857143 -# No extrapolation, [2@20, 144@80]: (144 - 2) / 60 -eval instant at 80s rate(metric[1m:10s]) - {} 2.366666667 +# Extrapolated to range start for counter, [2@20, 144@80]: (144 - 2) / (80 - 20) +eval instant at 80s rate(metric[1m500ms:10s]) + {} 2.3666666666666667 + +# Extrapolated to zero value for counter, [2@20, 144@80]: (144 - 0) / 61 +eval instant at 80s rate(metric[1m1s:10s]) + {} 2.360655737704918 # Only one value between 10s and 20s, 2@14 eval instant at 20s min_over_time(metric[10s]) {} 2 -# min(1@10, 2@20) -eval instant at 20s min_over_time(metric[10s:10s]) +# min(2@20) +eval instant at 20s min_over_time(metric[15s:10s]) {} 1 eval instant at 20m min_over_time(rate(metric[5m])[20m:1m]) diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/trig_functions.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/trig_functions.test index fa5f94651b6..036621193d7 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/trig_functions.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/trig_functions.test @@ -5,92 +5,92 @@ load 5m trig{l="y"} 20 trig{l="NaN"} NaN -eval instant at 5m sin(trig) +eval instant at 1m sin(trig) {l="x"} -0.5440211108893699 {l="y"} 0.9129452507276277 {l="NaN"} NaN -eval instant at 5m cos(trig) +eval instant at 1m cos(trig) {l="x"} -0.8390715290764524 {l="y"} 0.40808206181339196 {l="NaN"} NaN -eval instant at 5m tan(trig) +eval instant at 1m tan(trig) {l="x"} 0.6483608274590867 {l="y"} 2.2371609442247427 {l="NaN"} NaN -eval instant at 5m asin(trig - 10.1) +eval instant at 1m asin(trig - 10.1) {l="x"} -0.10016742116155944 {l="y"} NaN {l="NaN"} NaN -eval instant at 5m acos(trig - 10.1) +eval instant at 1m acos(trig - 10.1) {l="x"} 1.670963747956456 {l="y"} NaN {l="NaN"} NaN -eval instant at 5m atan(trig) +eval instant at 1m atan(trig) {l="x"} 1.4711276743037345 {l="y"} 1.5208379310729538 {l="NaN"} NaN -eval instant at 5m sinh(trig) +eval instant at 1m sinh(trig) {l="x"} 11013.232920103324 {l="y"} 2.4258259770489514e+08 {l="NaN"} NaN -eval instant at 5m cosh(trig) +eval instant at 1m cosh(trig) {l="x"} 11013.232920103324 {l="y"} 2.4258259770489514e+08 {l="NaN"} NaN -eval instant at 5m tanh(trig) +eval instant at 1m tanh(trig) {l="x"} 0.9999999958776927 {l="y"} 1 {l="NaN"} NaN -eval instant at 5m asinh(trig) +eval instant at 1m asinh(trig) {l="x"} 2.99822295029797 {l="y"} 3.6895038689889055 {l="NaN"} NaN -eval instant at 5m acosh(trig) +eval instant at 1m acosh(trig) {l="x"} 2.993222846126381 {l="y"} 3.6882538673612966 {l="NaN"} NaN -eval instant at 5m atanh(trig - 10.1) +eval instant at 1m atanh(trig - 10.1) {l="x"} -0.10033534773107522 {l="y"} NaN {l="NaN"} NaN -eval instant at 5m rad(trig) +eval instant at 1m rad(trig) {l="x"} 0.17453292519943295 {l="y"} 0.3490658503988659 {l="NaN"} NaN -eval instant at 5m rad(trig - 10) +eval instant at 1m rad(trig - 10) {l="x"} 0 {l="y"} 0.17453292519943295 {l="NaN"} NaN -eval instant at 5m rad(trig - 20) +eval instant at 1m rad(trig - 20) {l="x"} -0.17453292519943295 {l="y"} 0 {l="NaN"} NaN -eval instant at 5m deg(trig) +eval instant at 1m deg(trig) {l="x"} 572.9577951308232 {l="y"} 1145.9155902616465 {l="NaN"} NaN -eval instant at 5m deg(trig - 10) +eval instant at 1m deg(trig - 10) {l="x"} 0 {l="y"} 572.9577951308232 {l="NaN"} NaN -eval instant at 5m deg(trig - 20) +eval instant at 1m deg(trig - 20) {l="x"} -572.9577951308232 {l="y"} 0 {l="NaN"} NaN diff --git a/vendor/github.com/prometheus/prometheus/promql/quantile.go b/vendor/github.com/prometheus/prometheus/promql/quantile.go index 7ddb76acba7..06775d3ae67 100644 --- a/vendor/github.com/prometheus/prometheus/promql/quantile.go +++ b/vendor/github.com/prometheus/prometheus/promql/quantile.go @@ -153,19 +153,31 @@ func bucketQuantile(q float64, buckets buckets) (float64, bool, bool) { // histogramQuantile calculates the quantile 'q' based on the given histogram. // -// The quantile value is interpolated assuming a linear distribution within a -// bucket. -// TODO(beorn7): Find an interpolation method that is a better fit for -// exponential buckets (and think about configurable interpolation). +// For custom buckets, the result is interpolated linearly, i.e. it is assumed +// the observations are uniformly distributed within each bucket. (This is a +// quite blunt assumption, but it is consistent with the interpolation method +// used for classic histograms so far.) +// +// For exponential buckets, the interpolation is done under the assumption that +// the samples within each bucket are distributed in a way that they would +// uniformly populate the buckets in a hypothetical histogram with higher +// resolution. For example, if the rank calculation suggests that the requested +// quantile is right in the middle of the population of the (1,2] bucket, we +// assume the quantile would be right at the bucket boundary between the two +// buckets the (1,2] bucket would be divided into if the histogram had double +// the resolution, which is 2**2**-1 = 1.4142... We call this exponential +// interpolation. +// +// However, for a quantile that ends up in the zero bucket, this method isn't +// very helpful (because there is an infinite number of buckets close to zero, +// so we would have to assume zero as the result). Therefore, we return to +// linear interpolation in the zero bucket. // // A natural lower bound of 0 is assumed if the histogram has only positive // buckets. Likewise, a natural upper bound of 0 is assumed if the histogram has // only negative buckets. -// TODO(beorn7): Come to terms if we want that. // -// There are a number of special cases (once we have a way to report errors -// happening during evaluations of AST functions, we should report those -// explicitly): +// There are a number of special cases: // // If the histogram has 0 observations, NaN is returned. // @@ -193,9 +205,9 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { rank float64 ) - // if there are NaN observations in the histogram (h.Sum is NaN), use the forward iterator - // if the q < 0.5, use the forward iterator - // if the q >= 0.5, use the reverse iterator + // If there are NaN observations in the histogram (h.Sum is NaN), use the forward iterator. + // If q < 0.5, use the forward iterator. + // If q >= 0.5, use the reverse iterator. if math.IsNaN(h.Sum) || q < 0.5 { it = h.AllBucketIterator() rank = q * h.Count @@ -260,8 +272,29 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { rank = count - rank } - // TODO(codesome): Use a better estimation than linear. - return bucket.Lower + (bucket.Upper-bucket.Lower)*(rank/bucket.Count) + // The fraction of how far we are into the current bucket. + fraction := rank / bucket.Count + + // Return linear interpolation for custom buckets and for quantiles that + // end up in the zero bucket. + if h.UsesCustomBuckets() || (bucket.Lower <= 0 && bucket.Upper >= 0) { + return bucket.Lower + (bucket.Upper-bucket.Lower)*fraction + } + + // For exponential buckets, we interpolate on a logarithmic scale. On a + // logarithmic scale, the exponential bucket boundaries (for any schema) + // become linear (every bucket has the same width). Therefore, after + // taking the logarithm of both bucket boundaries, we can use the + // calculated fraction in the same way as for linear interpolation (see + // above). Finally, we return to the normal scale by applying the + // exponential function to the result. + logLower := math.Log2(math.Abs(bucket.Lower)) + logUpper := math.Log2(math.Abs(bucket.Upper)) + if bucket.Lower > 0 { // Positive bucket. + return math.Exp2(logLower + (logUpper-logLower)*fraction) + } + // Otherwise, we are in a negative bucket and have to mirror things. + return -math.Exp2(logUpper + (logLower-logUpper)*(1-fraction)) } // histogramFraction calculates the fraction of observations between the @@ -271,8 +304,8 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { // histogramQuantile(0.9, h) returns 123.4, then histogramFraction(-Inf, 123.4, h) // returns 0.9. // -// The same notes (and TODOs) with regard to interpolation and assumptions about -// the zero bucket boundaries apply as for histogramQuantile. +// The same notes with regard to interpolation and assumptions about the zero +// bucket boundaries apply as for histogramQuantile. // // Whether either boundary is inclusive or exclusive doesn’t actually matter as // long as interpolation has to be performed anyway. In the case of a boundary @@ -310,7 +343,35 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6 ) for it.Next() { b := it.At() - if b.Lower < 0 && b.Upper > 0 { + zeroBucket := false + + // interpolateLinearly is used for custom buckets to be + // consistent with the linear interpolation known from classic + // histograms. It is also used for the zero bucket. + interpolateLinearly := func(v float64) float64 { + return rank + b.Count*(v-b.Lower)/(b.Upper-b.Lower) + } + + // interpolateExponentially is using the same exponential + // interpolation method as above for histogramQuantile. This + // method is a better fit for exponential bucketing. + interpolateExponentially := func(v float64) float64 { + var ( + logLower = math.Log2(math.Abs(b.Lower)) + logUpper = math.Log2(math.Abs(b.Upper)) + logV = math.Log2(math.Abs(v)) + fraction float64 + ) + if v > 0 { + fraction = (logV - logLower) / (logUpper - logLower) + } else { + fraction = 1 - ((logV - logUpper) / (logLower - logUpper)) + } + return rank + b.Count*fraction + } + + if b.Lower <= 0 && b.Upper >= 0 { + zeroBucket = true switch { case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0: // This is the zero bucket and the histogram has only @@ -325,10 +386,12 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6 } } if !lowerSet && b.Lower >= lower { + // We have hit the lower value at the lower bucket boundary. lowerRank = rank lowerSet = true } if !upperSet && b.Lower >= upper { + // We have hit the upper value at the lower bucket boundary. upperRank = rank upperSet = true } @@ -336,11 +399,21 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6 break } if !lowerSet && b.Lower < lower && b.Upper > lower { - lowerRank = rank + b.Count*(lower-b.Lower)/(b.Upper-b.Lower) + // The lower value is in this bucket. + if h.UsesCustomBuckets() || zeroBucket { + lowerRank = interpolateLinearly(lower) + } else { + lowerRank = interpolateExponentially(lower) + } lowerSet = true } if !upperSet && b.Lower < upper && b.Upper > upper { - upperRank = rank + b.Count*(upper-b.Lower)/(b.Upper-b.Lower) + // The upper value is in this bucket. + if h.UsesCustomBuckets() || zeroBucket { + upperRank = interpolateLinearly(upper) + } else { + upperRank = interpolateExponentially(upper) + } upperSet = true } if lowerSet && upperSet { diff --git a/vendor/github.com/prometheus/prometheus/promql/query_logger.go b/vendor/github.com/prometheus/prometheus/promql/query_logger.go index 7e06ebb97fe..c0a70b66d77 100644 --- a/vendor/github.com/prometheus/prometheus/promql/query_logger.go +++ b/vendor/github.com/prometheus/prometheus/promql/query_logger.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "strings" @@ -26,14 +27,12 @@ import ( "unicode/utf8" "github.com/edsrzf/mmap-go" - "github.com/go-kit/log" - "github.com/go-kit/log/level" ) type ActiveQueryTracker struct { - mmapedFile []byte + mmappedFile []byte getNextIndex chan int - logger log.Logger + logger *slog.Logger closer io.Closer maxConcurrent int } @@ -63,11 +62,11 @@ func parseBrokenJSON(brokenJSON []byte) (string, bool) { return queries, true } -func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { +func logUnfinishedQueries(filename string, filesize int, logger *slog.Logger) { if _, err := os.Stat(filename); err == nil { fd, err := os.Open(filename) if err != nil { - level.Error(logger).Log("msg", "Failed to open query log file", "err", err) + logger.Error("Failed to open query log file", "err", err) return } defer fd.Close() @@ -75,7 +74,7 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { brokenJSON := make([]byte, filesize) _, err = fd.Read(brokenJSON) if err != nil { - level.Error(logger).Log("msg", "Failed to read query log file", "err", err) + logger.Error("Failed to read query log file", "err", err) return } @@ -83,72 +82,72 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { if !queriesExist { return } - level.Info(logger).Log("msg", "These queries didn't finish in prometheus' last run:", "queries", queries) + logger.Info("These queries didn't finish in prometheus' last run:", "queries", queries) } } -type mmapedFile struct { +type mmappedFile struct { f io.Closer m mmap.MMap } -func (f *mmapedFile) Close() error { +func (f *mmappedFile) Close() error { err := f.m.Unmap() if err != nil { - err = fmt.Errorf("mmapedFile: unmapping: %w", err) + err = fmt.Errorf("mmappedFile: unmapping: %w", err) } if fErr := f.f.Close(); fErr != nil { - return errors.Join(fmt.Errorf("close mmapedFile.f: %w", fErr), err) + return errors.Join(fmt.Errorf("close mmappedFile.f: %w", fErr), err) } return err } -func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) { +func getMMappedFile(filename string, filesize int, logger *slog.Logger) ([]byte, io.Closer, error) { file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666) if err != nil { absPath, pathErr := filepath.Abs(filename) if pathErr != nil { absPath = filename } - level.Error(logger).Log("msg", "Error opening query log file", "file", absPath, "err", err) + logger.Error("Error opening query log file", "file", absPath, "err", err) return nil, nil, err } err = file.Truncate(int64(filesize)) if err != nil { file.Close() - level.Error(logger).Log("msg", "Error setting filesize.", "filesize", filesize, "err", err) + logger.Error("Error setting filesize.", "filesize", filesize, "err", err) return nil, nil, err } fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0) if err != nil { file.Close() - level.Error(logger).Log("msg", "Failed to mmap", "file", filename, "Attempted size", filesize, "err", err) + logger.Error("Failed to mmap", "file", filename, "Attempted size", filesize, "err", err) return nil, nil, err } - return fileAsBytes, &mmapedFile{f: file, m: fileAsBytes}, err + return fileAsBytes, &mmappedFile{f: file, m: fileAsBytes}, err } -func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker { +func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger *slog.Logger) *ActiveQueryTracker { err := os.MkdirAll(localStoragePath, 0o777) if err != nil { - level.Error(logger).Log("msg", "Failed to create directory for logging active queries") + logger.Error("Failed to create directory for logging active queries") } filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxConcurrent*entrySize logUnfinishedQueries(filename, filesize, logger) - fileAsBytes, closer, err := getMMapedFile(filename, filesize, logger) + fileAsBytes, closer, err := getMMappedFile(filename, filesize, logger) if err != nil { panic("Unable to create mmap-ed active query log") } copy(fileAsBytes, "[") activeQueryTracker := ActiveQueryTracker{ - mmapedFile: fileAsBytes, + mmappedFile: fileAsBytes, closer: closer, getNextIndex: make(chan int, maxConcurrent), logger: logger, @@ -174,18 +173,18 @@ func trimStringByBytes(str string, size int) string { return string(bytesStr[:trimIndex]) } -func _newJSONEntry(query string, timestamp int64, logger log.Logger) []byte { +func _newJSONEntry(query string, timestamp int64, logger *slog.Logger) []byte { entry := Entry{query, timestamp} jsonEntry, err := json.Marshal(entry) if err != nil { - level.Error(logger).Log("msg", "Cannot create json of query", "query", query) + logger.Error("Cannot create json of query", "query", query) return []byte{} } return jsonEntry } -func newJSONEntry(query string, logger log.Logger) []byte { +func newJSONEntry(query string, logger *slog.Logger) []byte { timestamp := time.Now().Unix() minEntryJSON := _newJSONEntry("", timestamp, logger) @@ -206,14 +205,14 @@ func (tracker ActiveQueryTracker) GetMaxConcurrent() int { } func (tracker ActiveQueryTracker) Delete(insertIndex int) { - copy(tracker.mmapedFile[insertIndex:], strings.Repeat("\x00", entrySize)) + copy(tracker.mmappedFile[insertIndex:], strings.Repeat("\x00", entrySize)) tracker.getNextIndex <- insertIndex } func (tracker ActiveQueryTracker) Insert(ctx context.Context, query string) (int, error) { select { case i := <-tracker.getNextIndex: - fileBytes := tracker.mmapedFile + fileBytes := tracker.mmappedFile entry := newJSONEntry(query, tracker.logger) start, end := i, i+entrySize diff --git a/vendor/github.com/prometheus/prometheus/promql/value.go b/vendor/github.com/prometheus/prometheus/promql/value.go index f25dbcd7809..f19c0b5b582 100644 --- a/vendor/github.com/prometheus/prometheus/promql/value.go +++ b/vendor/github.com/prometheus/prometheus/promql/value.go @@ -526,7 +526,7 @@ func (ssi *storageSeriesIterator) Next() chunkenc.ValueType { ssi.currH = p.H return chunkenc.ValFloatHistogram default: - panic("storageSeriesIterater.Next failed to pick value type") + panic("storageSeriesIterator.Next failed to pick value type") } } diff --git a/vendor/github.com/prometheus/prometheus/rules/alerting.go b/vendor/github.com/prometheus/prometheus/rules/alerting.go index 038c49a6976..b94f3c5ff56 100644 --- a/vendor/github.com/prometheus/prometheus/rules/alerting.go +++ b/vendor/github.com/prometheus/prometheus/rules/alerting.go @@ -16,13 +16,12 @@ package rules import ( "context" "fmt" + "log/slog" "net/url" "strings" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "go.uber.org/atomic" "gopkg.in/yaml.v2" @@ -141,7 +140,7 @@ type AlertingRule struct { // the fingerprint of the labelset they correspond to. active map[uint64]*Alert - logger log.Logger + logger *slog.Logger noDependentRules *atomic.Bool noDependencyRules *atomic.Bool @@ -151,7 +150,7 @@ type AlertingRule struct { func NewAlertingRule( name string, vec parser.Expr, hold, keepFiringFor time.Duration, labels, annotations, externalLabels labels.Labels, externalURL string, - restored bool, logger log.Logger, + restored bool, logger *slog.Logger, ) *AlertingRule { el := externalLabels.Map() @@ -381,7 +380,7 @@ func (r *AlertingRule) Eval(ctx context.Context, queryOffset time.Duration, ts t result, err := tmpl.Expand() if err != nil { result = fmt.Sprintf("", err) - level.Warn(r.logger).Log("msg", "Expanding alert template failed", "err", err, "data", tmplData) + r.logger.Warn("Expanding alert template failed", "err", err, "data", tmplData) } return result } diff --git a/vendor/github.com/prometheus/prometheus/rules/group.go b/vendor/github.com/prometheus/prometheus/rules/group.go index 539149da4c8..b6feb6f9625 100644 --- a/vendor/github.com/prometheus/prometheus/rules/group.go +++ b/vendor/github.com/prometheus/prometheus/rules/group.go @@ -16,9 +16,9 @@ package rules import ( "context" "errors" + "log/slog" "math" "slices" - "sort" "strings" "sync" "time" @@ -27,10 +27,9 @@ import ( "github.com/prometheus/prometheus/promql/parser" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" @@ -67,7 +66,7 @@ type Group struct { terminated chan struct{} managerDone chan struct{} - logger log.Logger + logger *slog.Logger metrics *Metrics @@ -76,8 +75,8 @@ type Group struct { evalIterationFunc GroupEvalIterationFunc // concurrencyController controls the rules evaluation concurrency. - concurrencyController RuleConcurrencyController - + concurrencyController RuleConcurrencyController + appOpts *storage.AppendOptions alignEvaluationTimeOnInterval bool } @@ -104,9 +103,13 @@ type GroupOptions struct { // NewGroup makes a new Group with the given name, options, and rules. func NewGroup(o GroupOptions) *Group { - metrics := o.Opts.Metrics + opts := o.Opts + if opts == nil { + opts = &ManagerOptions{} + } + metrics := opts.Metrics if metrics == nil { - metrics = NewGroupMetrics(o.Opts.Registerer) + metrics = NewGroupMetrics(opts.Registerer) } key := GroupKey(o.File, o.Name) @@ -125,30 +128,34 @@ func NewGroup(o GroupOptions) *Group { evalIterationFunc = DefaultEvalIterationFunc } - concurrencyController := o.Opts.RuleConcurrencyController + concurrencyController := opts.RuleConcurrencyController if concurrencyController == nil { concurrencyController = sequentialRuleEvalController{} } - return &Group{ - name: o.Name, - file: o.File, - interval: o.Interval, - queryOffset: o.QueryOffset, - limit: o.Limit, - rules: o.Rules, - shouldRestore: o.ShouldRestore, - opts: o.Opts, - sourceTenants: o.SourceTenants, - seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)), - done: make(chan struct{}), - managerDone: o.done, - terminated: make(chan struct{}), - logger: log.With(o.Opts.Logger, "file", o.File, "group", o.Name), - metrics: metrics, - evalIterationFunc: evalIterationFunc, - concurrencyController: concurrencyController, + if opts.Logger == nil { + opts.Logger = promslog.NewNopLogger() + } + return &Group{ + name: o.Name, + file: o.File, + interval: o.Interval, + queryOffset: o.QueryOffset, + limit: o.Limit, + rules: o.Rules, + shouldRestore: o.ShouldRestore, + opts: opts, + sourceTenants: o.SourceTenants, + seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)), + done: make(chan struct{}), + managerDone: o.done, + terminated: make(chan struct{}), + logger: opts.Logger.With("file", o.File, "group", o.Name), + metrics: metrics, + evalIterationFunc: evalIterationFunc, + concurrencyController: concurrencyController, + appOpts: &storage.AppendOptions{DiscardOutOfOrder: true}, alignEvaluationTimeOnInterval: o.AlignEvaluationTimeOnInterval, } } @@ -197,7 +204,7 @@ func matchesMatcherSets(matcherSets [][]*labels.Matcher, lbls labels.Labels) boo return ok } -// Queryable returns the group's querable. +// Queryable returns the group's queryable. func (g *Group) Queryable() storage.Queryable { return g.opts.Queryable } // Context returns the group's context. @@ -213,7 +220,7 @@ func (g *Group) Limit() int { return g.limit } // If it's empty or nil, then the owning user/tenant is considered to be the source tenant. func (g *Group) SourceTenants() []string { return g.sourceTenants } -func (g *Group) Logger() log.Logger { return g.logger } +func (g *Group) Logger() *slog.Logger { return g.logger } func (g *Group) run(ctx context.Context) { defer close(g.terminated) @@ -285,7 +292,7 @@ func (g *Group) run(ctx context.Context) { g.RestoreForState(restoreStartTime) totalRestoreTimeSeconds := time.Since(restoreStartTime).Seconds() g.metrics.GroupLastRestoreDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(totalRestoreTimeSeconds) - level.Debug(g.logger).Log("msg", "'for' state restoration completed", "duration_seconds", totalRestoreTimeSeconds) + g.logger.Debug("'for' state restoration completed", "duration_seconds", totalRestoreTimeSeconds) g.shouldRestore = false } @@ -514,7 +521,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { defer cleanup() } - logger := log.WithPrefix(g.logger, "name", rule.Name(), "index", i) + logger := g.logger.With("name", rule.Name(), "index", i) ctx, sp := otel.Tracer("").Start(ctx, "rule") sp.SetAttributes(attribute.String("name", rule.Name())) defer func(t time.Time) { @@ -527,7 +534,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { }(time.Now()) if sp.SpanContext().IsSampled() && sp.SpanContext().HasTraceID() { - logger = log.WithPrefix(logger, "trace_id", sp.SpanContext().TraceID()) + logger = logger.With("trace_id", sp.SpanContext().TraceID()) } g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() @@ -543,7 +550,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { // happens on shutdown and thus we skip logging of any errors here. var eqc promql.ErrQueryCanceled if !errors.As(err, &eqc) { - level.Warn(logger).Log("msg", "Evaluating rule failed", "rule", rule, "err", err) + logger.Warn("Evaluating rule failed", "rule", rule, "err", err) } return } @@ -569,7 +576,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { sp.SetStatus(codes.Error, err.Error()) g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() - level.Warn(logger).Log("msg", "Rule sample appending failed", "err", err) + logger.Warn("Rule sample appending failed", "err", err) return } g.seriesInPreviousEval[i] = seriesReturned @@ -579,6 +586,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { if s.H != nil { _, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H) } else { + app.SetOptions(g.appOpts) _, err = app.Append(0, s.Metric, s.T, s.F) } @@ -593,15 +601,15 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { switch { case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample): numOutOfOrder++ - level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) case errors.Is(unwrappedErr, storage.ErrTooOldSample): numTooOld++ - level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): numDuplicates++ - level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) default: - level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) } } else { buf := [1024]byte{} @@ -609,13 +617,13 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { } } if numOutOfOrder > 0 { - level.Warn(logger).Log("msg", "Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder) + logger.Warn("Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder) } if numTooOld > 0 { - level.Warn(logger).Log("msg", "Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld) + logger.Warn("Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld) } if numDuplicates > 0 { - level.Warn(logger).Log("msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates) + logger.Warn("Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates) } for metric, lset := range g.seriesInPreviousEval[i] { @@ -634,7 +642,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { // Do not count these in logging, as this is expected if series // is exposed from a different rule. default: - level.Warn(logger).Log("msg", "Adding stale sample failed", "sample", lset.String(), "err", err) + logger.Warn("Adding stale sample failed", "sample", lset.String(), "err", err) } } } @@ -675,6 +683,7 @@ func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) { return } app := g.opts.Appendable.Appender(ctx) + app.SetOptions(g.appOpts) queryOffset := g.QueryOffset() for _, s := range g.staleSeries { // Rule that produced series no longer configured, mark it stale. @@ -691,11 +700,11 @@ func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) { // Do not count these in logging, as this is expected if series // is exposed from a different rule. default: - level.Warn(g.logger).Log("msg", "Adding stale sample for previous configuration failed", "sample", s, "err", err) + g.logger.Warn("Adding stale sample for previous configuration failed", "sample", s, "err", err) } } if err := app.Commit(); err != nil { - level.Warn(g.logger).Log("msg", "Stale sample appending for previous configuration failed", "err", err) + g.logger.Warn("Stale sample appending for previous configuration failed", "err", err) } else { g.staleSeries = nil } @@ -710,12 +719,12 @@ func (g *Group) RestoreForState(ts time.Time) { mintMS := int64(model.TimeFromUnixNano(mint.UnixNano())) q, err := g.opts.Queryable.Querier(mintMS, maxtMS) if err != nil { - level.Error(g.logger).Log("msg", "Failed to get Querier", "err", err) + g.logger.Error("Failed to get Querier", "err", err) return } defer func() { if err := q.Close(); err != nil { - level.Error(g.logger).Log("msg", "Failed to close Querier", "err", err) + g.logger.Error("Failed to close Querier", "err", err) } }() @@ -736,8 +745,8 @@ func (g *Group) RestoreForState(ts time.Time) { sset, err := alertRule.QueryForStateSeries(g.opts.Context, q) if err != nil { - level.Error(g.logger).Log( - "msg", "Failed to restore 'for' state", + g.logger.Error( + "Failed to restore 'for' state", labels.AlertName, alertRule.Name(), "stage", "Select", "err", err, @@ -756,7 +765,7 @@ func (g *Group) RestoreForState(ts time.Time) { // No results for this alert rule. if len(seriesByLabels) == 0 { - level.Debug(g.logger).Log("msg", "No series found to restore the 'for' state of the alert rule", labels.AlertName, alertRule.Name()) + g.logger.Debug("No series found to restore the 'for' state of the alert rule", labels.AlertName, alertRule.Name()) alertRule.SetRestored(true) continue } @@ -776,7 +785,7 @@ func (g *Group) RestoreForState(ts time.Time) { t, v = it.At() } if it.Err() != nil { - level.Error(g.logger).Log("msg", "Failed to restore 'for' state", + g.logger.Error("Failed to restore 'for' state", labels.AlertName, alertRule.Name(), "stage", "Iterator", "err", it.Err()) return } @@ -818,7 +827,7 @@ func (g *Group) RestoreForState(ts time.Time) { } a.ActiveAt = restoredActiveAt - level.Debug(g.logger).Log("msg", "'for' state restored", + g.logger.Debug("'for' state restored", labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850), "labels", a.Labels.String()) }) @@ -871,7 +880,7 @@ func (g *Group) Equals(ng *Group) bool { copyAndSort := func(x []string) []string { copied := make([]string, len(x)) copy(copied, x) - sort.Strings(copied) + slices.Sort(copied) return copied } diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go index a2421a4d2d5..b5bb0151166 100644 --- a/vendor/github.com/prometheus/prometheus/rules/manager.go +++ b/vendor/github.com/prometheus/prometheus/rules/manager.go @@ -17,15 +17,15 @@ import ( "context" "errors" "fmt" + "log/slog" "net/url" "slices" "strings" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "golang.org/x/sync/semaphore" "github.com/prometheus/prometheus/model/labels" @@ -96,7 +96,7 @@ type Manager struct { done chan struct{} restored bool - logger log.Logger + logger *slog.Logger } // NotifyFunc sends notifications about a set of alerts generated by the given expression. @@ -112,7 +112,7 @@ type ManagerOptions struct { Context context.Context Appendable storage.Appendable Queryable storage.Queryable - Logger log.Logger + Logger *slog.Logger Registerer prometheus.Registerer OutageTolerance time.Duration ForGracePeriod time.Duration @@ -158,6 +158,10 @@ func NewManager(o *ManagerOptions) *Manager { o.RuleDependencyController = ruleDependencyController{} } + if o.Logger == nil { + o.Logger = promslog.NewNopLogger() + } + m := &Manager{ groups: map[string]*Group{}, opts: o, @@ -171,7 +175,7 @@ func NewManager(o *ManagerOptions) *Manager { // Run starts processing of the rule manager. It is blocking. func (m *Manager) Run() { - level.Info(m.logger).Log("msg", "Starting rule manager...") + m.logger.Info("Starting rule manager...") m.start() <-m.done } @@ -185,7 +189,7 @@ func (m *Manager) Stop() { m.mtx.Lock() defer m.mtx.Unlock() - level.Info(m.logger).Log("msg", "Stopping rule manager...") + m.logger.Info("Stopping rule manager...") for _, eg := range m.groups { eg.stop() @@ -195,7 +199,7 @@ func (m *Manager) Stop() { // staleness markers. close(m.done) - level.Info(m.logger).Log("msg", "Rule manager stopped") + m.logger.Info("Rule manager stopped") } // Update the rule manager's state as the config requires. If @@ -216,7 +220,7 @@ func (m *Manager) Update(interval time.Duration, files []string, externalLabels if errs != nil { for _, e := range errs { - level.Error(m.logger).Log("msg", "loading groups failed", "err", e) + m.logger.Error("loading groups failed", "err", e) } return errors.New("error loading rules, previous rule set restored") } @@ -327,25 +331,27 @@ func (m *Manager) LoadGroups( return nil, []error{fmt.Errorf("%s: %w", fn, err)} } + mLabels := FromMaps(rg.Labels, r.Labels) + if r.Alert.Value != "" { rules = append(rules, NewAlertingRule( r.Alert.Value, expr, time.Duration(r.For), time.Duration(r.KeepFiringFor), - labels.FromMap(r.Labels), + mLabels, labels.FromMap(r.Annotations), externalLabels, externalURL, !shouldRestore, - log.With(m.logger, "alert", r.Alert), + m.logger.With("alert", r.Alert), )) continue } rules = append(rules, NewRecordingRule( r.Record.Value, expr, - labels.FromMap(r.Labels), + mLabels, )) } @@ -530,3 +536,16 @@ func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) } func (c sequentialRuleEvalController) Done(_ context.Context) {} + +// FromMaps returns new sorted Labels from the given maps, overriding each other in order. +func FromMaps(maps ...map[string]string) labels.Labels { + mLables := make(map[string]string) + + for _, m := range maps { + for k, v := range m { + mLables[k] = v + } + } + + return labels.FromMap(mLables) +} diff --git a/vendor/github.com/prometheus/prometheus/scrape/manager.go b/vendor/github.com/prometheus/prometheus/scrape/manager.go index d7786a082bd..f3dad2a0488 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/manager.go +++ b/vendor/github.com/prometheus/prometheus/scrape/manager.go @@ -17,32 +17,32 @@ import ( "errors" "fmt" "hash/fnv" - "io" + "log/slog" "reflect" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/osutil" "github.com/prometheus/prometheus/util/pool" ) // NewManager is the Manager constructor. -func NewManager(o *Options, logger log.Logger, newScrapeFailureLogger func(string) (log.Logger, error), app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) { +func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error), app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) { if o == nil { o = &Options{} } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } sm, err := newScrapeMetrics(registerer) @@ -70,8 +70,7 @@ func NewManager(o *Options, logger log.Logger, newScrapeFailureLogger func(strin // Options are the configuration parameters to the scrape manager. type Options struct { - ExtraMetrics bool - NoDefaultPort bool + ExtraMetrics bool // Option used by downstream scraper users like OpenTelemetry Collector // to help lookup metric metadata. Should be false for Prometheus. PassMetadataInContext bool @@ -101,7 +100,7 @@ const DefaultNameEscapingScheme = model.ValueEncodingEscaping // when receiving new target groups from the discovery manager. type Manager struct { opts *Options - logger log.Logger + logger *slog.Logger append storage.Appendable graceShut chan struct{} @@ -109,8 +108,8 @@ type Manager struct { mtxScrape sync.Mutex // Guards the fields below. scrapeConfigs map[string]*config.ScrapeConfig scrapePools map[string]*scrapePool - newScrapeFailureLogger func(string) (log.Logger, error) - scrapeFailureLoggers map[string]log.Logger + newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error) + scrapeFailureLoggers map[string]*logging.JSONFileLogger targetSets map[string][]*targetgroup.Group buffers *pool.Pool @@ -176,21 +175,26 @@ func (m *Manager) reload() { if _, ok := m.scrapePools[setName]; !ok { scrapeConfig, ok := m.scrapeConfigs[setName] if !ok { - level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName) + m.logger.Error("error reloading target set", "err", "invalid config id:"+setName) + continue + } + if scrapeConfig.ConvertClassicHistogramsToNHCB && m.opts.EnableCreatedTimestampZeroIngestion { + // TODO(krajorama): fix https://github.com/prometheus/prometheus/issues/15137 + m.logger.Error("error reloading target set", "err", "cannot convert classic histograms to native histograms with custom buckets and ingest created timestamp zero samples at the same time due to https://github.com/prometheus/prometheus/issues/15137") continue } m.metrics.targetScrapePools.Inc() - sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, log.With(m.logger, "scrape_pool", setName), m.buffers, m.opts, m.metrics) + sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, m.logger.With("scrape_pool", setName), m.buffers, m.opts, m.metrics) if err != nil { m.metrics.targetScrapePoolsFailed.Inc() - level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName) + m.logger.Error("error creating new scrape pool", "err", err, "scrape_pool", setName) continue } m.scrapePools[setName] = sp if l, ok := m.scrapeFailureLoggers[scrapeConfig.ScrapeFailureLogFile]; ok { sp.SetScrapeFailureLogger(l) } else { - level.Error(sp.logger).Log("msg", "No logger found. This is a bug in Prometheus that should be reported upstream.", "scrape_pool", setName) + sp.logger.Error("No logger found. This is a bug in Prometheus that should be reported upstream.", "scrape_pool", setName) } } @@ -247,7 +251,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { } c := make(map[string]*config.ScrapeConfig) - scrapeFailureLoggers := map[string]log.Logger{ + scrapeFailureLoggers := map[string]*logging.JSONFileLogger{ "": nil, // Emptying the file name sets the scrape logger to nil. } for _, scfg := range scfgs { @@ -255,23 +259,23 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { if _, ok := scrapeFailureLoggers[scfg.ScrapeFailureLogFile]; !ok { // We promise to reopen the file on each reload. var ( - l log.Logger - err error + logger *logging.JSONFileLogger + err error ) if m.newScrapeFailureLogger != nil { - if l, err = m.newScrapeFailureLogger(scfg.ScrapeFailureLogFile); err != nil { + if logger, err = m.newScrapeFailureLogger(scfg.ScrapeFailureLogFile); err != nil { return err } } - scrapeFailureLoggers[scfg.ScrapeFailureLogFile] = l + scrapeFailureLoggers[scfg.ScrapeFailureLogFile] = logger } } m.scrapeConfigs = c oldScrapeFailureLoggers := m.scrapeFailureLoggers for _, s := range oldScrapeFailureLoggers { - if closer, ok := s.(io.Closer); ok { - defer closer.Close() + if s != nil { + defer s.Close() } } @@ -291,7 +295,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { case !reflect.DeepEqual(sp.config, cfg): err := sp.reload(cfg) if err != nil { - level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name) + m.logger.Error("error reloading scrape pool", "err", err, "scrape_pool", name) failed = true } fallthrough @@ -299,7 +303,7 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { if l, ok := m.scrapeFailureLoggers[cfg.ScrapeFailureLogFile]; ok { sp.SetScrapeFailureLogger(l) } else { - level.Error(sp.logger).Log("msg", "No logger found. This is a bug in Prometheus that should be reported upstream.", "scrape_pool", name) + sp.logger.Error("No logger found. This is a bug in Prometheus that should be reported upstream.", "scrape_pool", name) } } } diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index ea98432be6d..7e270bb3a3c 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "net/http" "reflect" @@ -29,11 +30,10 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/klauspost/compress/gzip" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/config" @@ -47,6 +47,7 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/pool" ) @@ -63,7 +64,7 @@ var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", labels. // scrapePool manages scrapes for sets of targets. type scrapePool struct { appendable storage.Appendable - logger log.Logger + logger *slog.Logger cancel context.CancelFunc httpOpts []config_util.HTTPClientOption @@ -87,11 +88,9 @@ type scrapePool struct { // Constructor for new scrape loops. This is settable for testing convenience. newLoop func(scrapeLoopOptions) loop - noDefaultPort bool - metrics *scrapeMetrics - scrapeFailureLogger log.Logger + scrapeFailureLogger *logging.JSONFileLogger scrapeFailureLoggerMtx sync.RWMutex } @@ -113,8 +112,10 @@ type scrapeLoopOptions struct { trackTimestampsStaleness bool interval time.Duration timeout time.Duration - scrapeClassicHistograms bool + alwaysScrapeClassicHist bool + convertClassicHistToNHCB bool validationScheme model.ValidationScheme + fallbackScrapeProtocol string mrc []*relabel.Config cache *scrapeCache @@ -126,9 +127,9 @@ const maxAheadTime = 10 * time.Minute // returning an empty label set is interpreted as "drop". type labelsMutator func(labels.Labels) labels.Labels -func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, buffers *pool.Pool, options *Options, metrics *scrapeMetrics) (*scrapePool, error) { +func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger *slog.Logger, buffers *pool.Pool, options *Options, metrics *scrapeMetrics) (*scrapePool, error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...) @@ -149,7 +150,6 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed logger: logger, metrics: metrics, httpOpts: options.HTTPClientOptions, - noDefaultPort: options.NoDefaultPort, } sp.newLoop = func(opts scrapeLoopOptions) loop { // Update the targets retrieval function for metadata to a new scrape cache. @@ -162,7 +162,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed return newScrapeLoop( ctx, opts.scraper, - log.With(logger, "target", opts.target), + logger.With("target", opts.target), buffers, func(l labels.Labels) labels.Labels { return mutateSampleLabels(l, opts.target, opts.honorLabels, opts.mrc) @@ -181,7 +181,8 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed opts.labelLimits, opts.interval, opts.timeout, - opts.scrapeClassicHistograms, + opts.alwaysScrapeClassicHist, + opts.convertClassicHistToNHCB, options.EnableNativeHistogramsIngestion, options.EnableCreatedTimestampZeroIngestion, options.ExtraMetrics, @@ -191,6 +192,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed metrics, options.skipOffsetting, opts.validationScheme, + opts.fallbackScrapeProtocol, ) } sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) @@ -221,11 +223,11 @@ func (sp *scrapePool) DroppedTargetsCount() int { return sp.droppedTargetsCount } -func (sp *scrapePool) SetScrapeFailureLogger(l log.Logger) { +func (sp *scrapePool) SetScrapeFailureLogger(l *logging.JSONFileLogger) { sp.scrapeFailureLoggerMtx.Lock() defer sp.scrapeFailureLoggerMtx.Unlock() if l != nil { - l = log.With(l, "job_name", sp.config.JobName) + l.With("job_name", sp.config.JobName) } sp.scrapeFailureLogger = l @@ -236,7 +238,7 @@ func (sp *scrapePool) SetScrapeFailureLogger(l log.Logger) { } } -func (sp *scrapePool) getScrapeFailureLogger() log.Logger { +func (sp *scrapePool) getScrapeFailureLogger() *logging.JSONFileLogger { sp.scrapeFailureLoggerMtx.RLock() defer sp.scrapeFailureLoggerMtx.RUnlock() return sp.scrapeFailureLogger @@ -327,11 +329,12 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { enableCompression = sp.config.EnableCompression trackTimestampsStaleness = sp.config.TrackTimestampsStaleness mrc = sp.config.MetricRelabelConfigs + fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType() ) - validationScheme := model.LegacyValidation - if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig { - validationScheme = model.UTF8Validation + validationScheme := model.UTF8Validation + if sp.config.MetricNameValidationScheme == config.LegacyValidationConfig { + validationScheme = model.LegacyValidation } sp.targetMtx.Lock() @@ -373,6 +376,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { interval: interval, timeout: timeout, validationScheme: validationScheme, + fallbackScrapeProtocol: fallbackScrapeProtocol, }) ) if err != nil { @@ -429,9 +433,9 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { sp.droppedTargets = []*Target{} sp.droppedTargetsCount = 0 for _, tg := range tgs { - targets, failures := TargetsFromGroup(tg, sp.config, sp.noDefaultPort, targets, lb) + targets, failures := TargetsFromGroup(tg, sp.config, targets, lb) for _, err := range failures { - level.Error(sp.logger).Log("msg", "Creating target failed", "err", err) + sp.logger.Error("Creating target failed", "err", err) } sp.metrics.targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures))) for _, t := range targets { @@ -482,12 +486,14 @@ func (sp *scrapePool) sync(targets []*Target) { enableCompression = sp.config.EnableCompression trackTimestampsStaleness = sp.config.TrackTimestampsStaleness mrc = sp.config.MetricRelabelConfigs - scrapeClassicHistograms = sp.config.ScrapeClassicHistograms + fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType() + alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistograms + convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCB ) - validationScheme := model.LegacyValidation - if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig { - validationScheme = model.UTF8Validation + validationScheme := model.UTF8Validation + if sp.config.MetricNameValidationScheme == config.LegacyValidationConfig { + validationScheme = model.LegacyValidation } sp.targetMtx.Lock() @@ -523,7 +529,10 @@ func (sp *scrapePool) sync(targets []*Target) { mrc: mrc, interval: interval, timeout: timeout, - scrapeClassicHistograms: scrapeClassicHistograms, + alwaysScrapeClassicHist: alwaysScrapeClassicHist, + convertClassicHistToNHCB: convertClassicHistToNHCB, + validationScheme: validationScheme, + fallbackScrapeProtocol: fallbackScrapeProtocol, }) if err != nil { l.setForcedError(err) @@ -851,7 +860,7 @@ func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w type loop interface { run(errc chan<- error) setForcedError(err error) - setScrapeFailureLogger(log.Logger) + setScrapeFailureLogger(*logging.JSONFileLogger) stop() getCache() *scrapeCache disableEndOfRunStalenessMarkers() @@ -866,8 +875,8 @@ type cacheEntry struct { type scrapeLoop struct { scraper scraper - l log.Logger - scrapeFailureLogger log.Logger + l *slog.Logger + scrapeFailureLogger *logging.JSONFileLogger scrapeFailureLoggerMtx sync.RWMutex cache *scrapeCache lastScrapeSize int @@ -884,8 +893,10 @@ type scrapeLoop struct { labelLimits *labelLimits interval time.Duration timeout time.Duration - scrapeClassicHistograms bool + alwaysScrapeClassicHist bool + convertClassicHistToNHCB bool validationScheme model.ValidationScheme + fallbackScrapeProtocol string // Feature flagged options. enableNativeHistogramIngestion bool @@ -1167,7 +1178,7 @@ func (c *scrapeCache) LengthMetadata() int { func newScrapeLoop(ctx context.Context, sc scraper, - l log.Logger, + l *slog.Logger, buffers *pool.Pool, sampleMutator labelsMutator, reportSampleMutator labelsMutator, @@ -1184,7 +1195,8 @@ func newScrapeLoop(ctx context.Context, labelLimits *labelLimits, interval time.Duration, timeout time.Duration, - scrapeClassicHistograms bool, + alwaysScrapeClassicHist bool, + convertClassicHistToNHCB bool, enableNativeHistogramIngestion bool, enableCTZeroIngestion bool, reportExtraMetrics bool, @@ -1194,9 +1206,10 @@ func newScrapeLoop(ctx context.Context, metrics *scrapeMetrics, skipOffsetting bool, validationScheme model.ValidationScheme, + fallbackScrapeProtocol string, ) *scrapeLoop { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } if buffers == nil { buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) @@ -1238,7 +1251,8 @@ func newScrapeLoop(ctx context.Context, labelLimits: labelLimits, interval: interval, timeout: timeout, - scrapeClassicHistograms: scrapeClassicHistograms, + alwaysScrapeClassicHist: alwaysScrapeClassicHist, + convertClassicHistToNHCB: convertClassicHistToNHCB, enableNativeHistogramIngestion: enableNativeHistogramIngestion, enableCTZeroIngestion: enableCTZeroIngestion, reportExtraMetrics: reportExtraMetrics, @@ -1246,17 +1260,18 @@ func newScrapeLoop(ctx context.Context, metrics: metrics, skipOffsetting: skipOffsetting, validationScheme: validationScheme, + fallbackScrapeProtocol: fallbackScrapeProtocol, } sl.ctx, sl.cancel = context.WithCancel(ctx) return sl } -func (sl *scrapeLoop) setScrapeFailureLogger(l log.Logger) { +func (sl *scrapeLoop) setScrapeFailureLogger(l *logging.JSONFileLogger) { sl.scrapeFailureLoggerMtx.Lock() defer sl.scrapeFailureLoggerMtx.Unlock() if ts, ok := sl.scraper.(fmt.Stringer); ok && l != nil { - l = log.With(l, "target", ts.String()) + l.With("target", ts.String()) } sl.scrapeFailureLogger = l } @@ -1354,13 +1369,13 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er } err = app.Commit() if err != nil { - level.Error(sl.l).Log("msg", "Scrape commit failed", "err", err) + sl.l.Error("Scrape commit failed", "err", err) } }() defer func() { if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, bytesRead, scrapeErr); err != nil { - level.Warn(sl.l).Log("msg", "Appending scrape report failed", "err", err) + sl.l.Warn("Appending scrape report failed", "err", err) } }() @@ -1370,7 +1385,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil { app.Rollback() app = sl.appender(sl.appenderCtx) - level.Warn(sl.l).Log("msg", "Append failed", "err", err) + sl.l.Warn("Append failed", "err", err) } if errc != nil { errc <- forcedErr @@ -1403,10 +1418,10 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er } bytesRead = len(b) } else { - level.Debug(sl.l).Log("msg", "Scrape failed", "err", scrapeErr) + sl.l.Debug("Scrape failed", "err", scrapeErr) sl.scrapeFailureLoggerMtx.RLock() if sl.scrapeFailureLogger != nil { - sl.scrapeFailureLogger.Log("err", scrapeErr) + sl.scrapeFailureLogger.Error("err", scrapeErr) } sl.scrapeFailureLoggerMtx.RUnlock() if errc != nil { @@ -1423,13 +1438,13 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er if appErr != nil { app.Rollback() app = sl.appender(sl.appenderCtx) - level.Debug(sl.l).Log("msg", "Append failed", "err", appErr) + sl.l.Debug("Append failed", "err", appErr) // The append failed, probably due to a parse error or sample limit. // Call sl.append again with an empty scrape to trigger stale markers. if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil { app.Rollback() app = sl.appender(sl.appenderCtx) - level.Warn(sl.l).Log("msg", "Append failed", "err", err) + sl.l.Warn("Append failed", "err", err) } } @@ -1502,16 +1517,16 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int } err = app.Commit() if err != nil { - level.Warn(sl.l).Log("msg", "Stale commit failed", "err", err) + sl.l.Warn("Stale commit failed", "err", err) } }() if _, _, _, err = sl.append(app, []byte{}, "", staleTime); err != nil { app.Rollback() app = sl.appender(sl.appenderCtx) - level.Warn(sl.l).Log("msg", "Stale append failed", "err", err) + sl.l.Warn("Stale append failed", "err", err) } if err = sl.reportStale(app, staleTime); err != nil { - level.Warn(sl.l).Log("msg", "Stale report failed", "err", err) + sl.l.Warn("Stale report failed", "err", err) } } @@ -1538,11 +1553,24 @@ type appendErrors struct { } func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { - p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms, sl.symbolTable) + p, err := textparse.New(b, contentType, sl.fallbackScrapeProtocol, sl.alwaysScrapeClassicHist, sl.enableCTZeroIngestion, sl.symbolTable) + if p == nil { + sl.l.Error( + "Failed to determine correct type of scrape target.", + "content_type", contentType, + "fallback_media_type", sl.fallbackScrapeProtocol, + "err", err, + ) + return + } + if sl.convertClassicHistToNHCB { + p = textparse.NewNHCBParser(p, sl.symbolTable, sl.alwaysScrapeClassicHist) + } if err != nil { - level.Debug(sl.l).Log( - "msg", "Invalid content type on scrape, using prometheus parser as fallback.", + sl.l.Debug( + "Invalid content type on scrape, using fallback setting.", "content_type", contentType, + "fallback_media_type", sl.fallbackScrapeProtocol, "err", err, ) } @@ -1558,7 +1586,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, metadataChanged bool ) - exemplars := make([]exemplar.Exemplar, 1) + exemplars := make([]exemplar.Exemplar, 0, 1) // updateMetadata updates the current iteration's metadata object and the // metadataChanged value if we have metadata in the scrape cache AND the @@ -1700,11 +1728,19 @@ loop: } else { if sl.enableCTZeroIngestion { if ctMs := p.CreatedTimestamp(); ctMs != nil { - ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) + if isHistogram && sl.enableNativeHistogramIngestion { + if h != nil { + ref, err = app.AppendHistogramCTZeroSample(ref, lset, t, *ctMs, h, nil) + } else { + ref, err = app.AppendHistogramCTZeroSample(ref, lset, t, *ctMs, nil, fh) + } + } else { + ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) + } if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. // CT is an experimental feature. For now, we don't need to fail the // scrape on errors updating the created timestamp, log debug. - level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) + sl.l.Debug("Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) } } } @@ -1729,7 +1765,7 @@ loop: sampleAdded, err = sl.checkAddError(met, err, &sampleLimitErr, &bucketLimitErr, &appErrs) if err != nil { if !errors.Is(err, storage.ErrNotFound) { - level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err) + sl.l.Debug("Unexpected error", "series", string(met), "err", err) } break loop } @@ -1781,21 +1817,21 @@ loop: outOfOrderExemplars++ default: // Since exemplar storage is still experimental, we don't fail the scrape on ingestion errors. - level.Debug(sl.l).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr) + sl.l.Debug("Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr) } } if outOfOrderExemplars > 0 && outOfOrderExemplars == len(exemplars) { // Only report out of order exemplars if all are out of order, otherwise this was a partial update // to some existing set of exemplars. appErrs.numExemplarOutOfOrder += outOfOrderExemplars - level.Debug(sl.l).Log("msg", "Out of order exemplars", "count", outOfOrderExemplars, "latest", fmt.Sprintf("%+v", exemplars[len(exemplars)-1])) + sl.l.Debug("Out of order exemplars", "count", outOfOrderExemplars, "latest", fmt.Sprintf("%+v", exemplars[len(exemplars)-1])) sl.metrics.targetScrapeExemplarOutOfOrder.Add(float64(outOfOrderExemplars)) } if sl.appendMetadataToWAL && metadataChanged { if _, merr := app.UpdateMetadata(ref, lset, meta); merr != nil { // No need to fail the scrape on errors appending metadata. - level.Debug(sl.l).Log("msg", "Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", meta), "err", merr) + sl.l.Debug("Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", meta), "err", merr) } } } @@ -1814,21 +1850,23 @@ loop: sl.metrics.targetScrapeNativeHistogramBucketLimit.Inc() } if appErrs.numOutOfOrder > 0 { - level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder) + sl.l.Warn("Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder) } if appErrs.numDuplicates > 0 { - level.Warn(sl.l).Log("msg", "Error on ingesting samples with different value but same timestamp", "num_dropped", appErrs.numDuplicates) + sl.l.Warn("Error on ingesting samples with different value but same timestamp", "num_dropped", appErrs.numDuplicates) } if appErrs.numOutOfBounds > 0 { - level.Warn(sl.l).Log("msg", "Error on ingesting samples that are too old or are too far into the future", "num_dropped", appErrs.numOutOfBounds) + sl.l.Warn("Error on ingesting samples that are too old or are too far into the future", "num_dropped", appErrs.numOutOfBounds) } if appErrs.numExemplarOutOfOrder > 0 { - level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder) + sl.l.Warn("Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder) } if err == nil { sl.cache.forEachStale(func(lset labels.Labels) bool { // Series no longer exposed, mark it stale. + app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true}) _, err = app.Append(0, lset, defTime, math.Float64frombits(value.StaleNaN)) + app.SetOptions(nil) switch { case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): // Do not count these in logging, as this is expected if a target @@ -1851,17 +1889,17 @@ func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucke return false, storage.ErrNotFound case errors.Is(err, storage.ErrOutOfOrderSample): appErrs.numOutOfOrder++ - level.Debug(sl.l).Log("msg", "Out of order sample", "series", string(met)) + sl.l.Debug("Out of order sample", "series", string(met)) sl.metrics.targetScrapeSampleOutOfOrder.Inc() return false, nil case errors.Is(err, storage.ErrDuplicateSampleForTimestamp): appErrs.numDuplicates++ - level.Debug(sl.l).Log("msg", "Duplicate sample for timestamp", "series", string(met)) + sl.l.Debug("Duplicate sample for timestamp", "series", string(met)) sl.metrics.targetScrapeSampleDuplicate.Inc() return false, nil case errors.Is(err, storage.ErrOutOfBounds): appErrs.numOutOfBounds++ - level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met)) + sl.l.Debug("Out of bounds metric", "series", string(met)) sl.metrics.targetScrapeSampleOutOfBounds.Inc() return false, nil case errors.Is(err, errSampleLimit): @@ -1934,7 +1972,7 @@ func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration tim func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err error) { ts := timestamp.FromTime(start) - + app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true}) stale := math.Float64frombits(value.StaleNaN) b := labels.NewBuilder(labels.EmptyLabels()) diff --git a/vendor/github.com/prometheus/prometheus/scrape/target.go b/vendor/github.com/prometheus/prometheus/scrape/target.go index 9ef4471fbd1..06d4737ff90 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/target.go +++ b/vendor/github.com/prometheus/prometheus/scrape/target.go @@ -17,7 +17,6 @@ import ( "errors" "fmt" "hash/fnv" - "net" "net/url" "strings" "sync" @@ -424,7 +423,7 @@ func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels // PopulateLabels builds a label set from the given label set and scrape configuration. // It returns a label set before relabeling was applied as the second return value. // Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. -func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort bool) (res, orig labels.Labels, err error) { +func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) { // Copy labels into the labelset for the target if they are not set already. scrapeLabels := []labels.Label{ {Name: model.JobLabel, Value: cfg.JobName}, @@ -441,8 +440,8 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort } // Encode scrape query parameters as labels. for k, v := range cfg.Params { - if len(v) > 0 { - lb.Set(model.ParamLabelPrefix+k, v[0]) + if name := model.ParamLabelPrefix + k; len(v) > 0 && lb.Get(name) == "" { + lb.Set(name, v[0]) } } @@ -457,51 +456,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address") } - // addPort checks whether we should add a default port to the address. - // If the address is not valid, we don't append a port either. - addPort := func(s string) (string, string, bool) { - // If we can split, a port exists and we don't have to add one. - if host, port, err := net.SplitHostPort(s); err == nil { - return host, port, false - } - // If adding a port makes it valid, the previous error - // was not due to an invalid address and we can append a port. - _, _, err := net.SplitHostPort(s + ":1234") - return "", "", err == nil - } - addr := lb.Get(model.AddressLabel) - scheme := lb.Get(model.SchemeLabel) - host, port, add := addPort(addr) - // If it's an address with no trailing port, infer it based on the used scheme - // unless the no-default-scrape-port feature flag is present. - if !noDefaultPort && add { - // Addresses reaching this point are already wrapped in [] if necessary. - switch scheme { - case "http", "": - addr += ":80" - case "https": - addr += ":443" - default: - return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("invalid scheme: %q", cfg.Scheme) - } - lb.Set(model.AddressLabel, addr) - } - - if noDefaultPort { - // If it's an address with a trailing default port and the - // no-default-scrape-port flag is present, remove the port. - switch port { - case "80": - if scheme == "http" { - lb.Set(model.AddressLabel, host) - } - case "443": - if scheme == "https" { - lb.Set(model.AddressLabel, host) - } - } - } if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { return labels.EmptyLabels(), labels.EmptyLabels(), err @@ -557,7 +512,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort } // TargetsFromGroup builds targets based on the given TargetGroup and config. -func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefaultPort bool, targets []*Target, lb *labels.Builder) ([]*Target, []error) { +func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, targets []*Target, lb *labels.Builder) ([]*Target, []error) { targets = targets[:0] failures := []error{} @@ -573,7 +528,7 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefault } } - lset, origLabels, err := PopulateLabels(lb, cfg, noDefaultPort) + lset, origLabels, err := PopulateLabels(lb, cfg) if err != nil { failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err)) } diff --git a/vendor/github.com/prometheus/prometheus/storage/buffer.go b/vendor/github.com/prometheus/prometheus/storage/buffer.go index 4c9b004ab66..e847c10e61a 100644 --- a/vendor/github.com/prometheus/prometheus/storage/buffer.go +++ b/vendor/github.com/prometheus/prometheus/storage/buffer.go @@ -253,9 +253,9 @@ type sampleRing struct { delta int64 // Lookback buffers. We use iBuf for mixed samples, but one of the three - // concrete ones for homogenous samples. (Only one of the four bufs is + // concrete ones for homogeneous samples. (Only one of the four bufs is // allowed to be populated!) This avoids the overhead of the interface - // wrapper for the happy (and by far most common) case of homogenous + // wrapper for the happy (and by far most common) case of homogeneous // samples. iBuf []chunks.Sample fBuf []fSample @@ -280,7 +280,7 @@ const ( fhBuf ) -// newSampleRing creates a new sampleRing. If you do not know the prefereed +// newSampleRing creates a new sampleRing. If you do not know the preferred // value type yet, use a size of 0 (in which case the provided typ doesn't // matter). On the first add, a buffer of size 16 will be allocated with the // preferred type being the type of the first added sample. @@ -626,7 +626,7 @@ func addF(s fSample, buf []fSample, r *sampleRing) []fSample { return buf } -// addF adds an hSample to a (specialized) hSample buffer. +// addH adds an hSample to a (specialized) hSample buffer. func addH(s hSample, buf []hSample, r *sampleRing) []hSample { l := len(buf) // Grow the ring buffer if it fits no more elements. diff --git a/vendor/github.com/prometheus/prometheus/storage/fanout.go b/vendor/github.com/prometheus/prometheus/storage/fanout.go index e52342bc7ed..4d076788a7c 100644 --- a/vendor/github.com/prometheus/prometheus/storage/fanout.go +++ b/vendor/github.com/prometheus/prometheus/storage/fanout.go @@ -15,9 +15,8 @@ package storage import ( "context" + "log/slog" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" @@ -28,7 +27,7 @@ import ( ) type fanout struct { - logger log.Logger + logger *slog.Logger primary Storage secondaries []Storage @@ -43,7 +42,7 @@ type fanout struct { // and the error from the secondary querier will be returned as a warning. // // NOTE: In the case of Prometheus, it treats all remote storages as secondary / best effort. -func NewFanout(logger log.Logger, primary Storage, secondaries ...Storage) Storage { +func NewFanout(logger *slog.Logger, primary Storage, secondaries ...Storage) Storage { return &fanout{ logger: logger, primary: primary, @@ -142,12 +141,22 @@ func (f *fanout) Close() error { // fanoutAppender implements Appender. type fanoutAppender struct { - logger log.Logger + logger *slog.Logger primary Appender secondaries []Appender } +// SetOptions propagates the hints to both primary and secondary appenders. +func (f *fanoutAppender) SetOptions(opts *AppendOptions) { + if f.primary != nil { + f.primary.SetOptions(opts) + } + for _, appender := range f.secondaries { + appender.SetOptions(opts) + } +} + func (f *fanoutAppender) Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error) { ref, err := f.primary.Append(ref, l, t, v) if err != nil { @@ -190,6 +199,20 @@ func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64 return ref, nil } +func (f *fanoutAppender) AppendHistogramCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) { + ref, err := f.primary.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh) + if err != nil { + return ref, err + } + + for _, appender := range f.secondaries { + if _, err := appender.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh); err != nil { + return 0, err + } + } + return ref, nil +} + func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) { ref, err := f.primary.UpdateMetadata(ref, l, m) if err != nil { @@ -226,7 +249,7 @@ func (f *fanoutAppender) Commit() (err error) { err = appender.Commit() } else { if rollbackErr := appender.Rollback(); rollbackErr != nil { - level.Error(f.logger).Log("msg", "Squashed rollback error on commit", "err", rollbackErr) + f.logger.Error("Squashed rollback error on commit", "err", rollbackErr) } } } @@ -242,7 +265,7 @@ func (f *fanoutAppender) Rollback() (err error) { case err == nil: err = rollbackErr case rollbackErr != nil: - level.Error(f.logger).Log("msg", "Squashed rollback error on rollback", "err", rollbackErr) + f.logger.Error("Squashed rollback error on rollback", "err", rollbackErr) } } return nil diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index 8035caaa6d6..0634acb092d 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -50,7 +50,8 @@ var ( // NOTE(bwplotka): This can be both an instrumentation failure or commonly expected // behaviour, and we currently don't have a way to determine this. As a result // it's recommended to ignore this error for now. - ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring") + ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring") + ErrCTNewerThanSample = fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring") ) // SeriesRef is a generic series reference. In prometheus it is either a @@ -113,6 +114,8 @@ type Querier interface { LabelQuerier // Select returns a set of series that matches the given label matchers. + // Results are not checked whether they match. Results that do not match + // may cause undefined behavior. // Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance. // It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all. Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet @@ -151,6 +154,8 @@ type ChunkQuerier interface { LabelQuerier // Select returns a set of series that matches the given label matchers. + // Results are not checked whether they match. Results that do not match + // may cause undefined behavior. // Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance. // It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all. Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet @@ -158,7 +163,7 @@ type ChunkQuerier interface { // LabelQuerier provides querying access over labels. type LabelQuerier interface { - // LabelValues returns all potential values for a label name. + // LabelValues returns all potential values for a label name in sorted order. // It is not safe to use the strings beyond the lifetime of the querier. // If matchers are specified the returned result set is reduced // to label values of metrics matching the matchers. @@ -238,6 +243,10 @@ func (f QueryableFunc) Querier(mint, maxt int64) (Querier, error) { return f(mint, maxt) } +type AppendOptions struct { + DiscardOutOfOrder bool +} + // Appender provides batched appends against a storage. // It must be completed with a call to Commit or Rollback and must not be reused afterwards. // @@ -266,6 +275,10 @@ type Appender interface { // Appender has to be discarded after rollback. Rollback() error + // SetOptions configures the appender with specific append options such as + // discarding out-of-order samples even if out-of-order is enabled in the TSDB. + SetOptions(opts *AppendOptions) + ExemplarAppender HistogramAppender MetadataUpdater @@ -313,6 +326,20 @@ type HistogramAppender interface { // pointer. AppendHistogram won't mutate the histogram, but in turn // depends on the caller to not mutate it either. AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) + // AppendHistogramCTZeroSample adds synthetic zero sample for the given ct timestamp, + // which will be associated with given series, labels and the incoming + // sample's t (timestamp). AppendHistogramCTZeroSample returns error if zero sample can't be + // appended, for example when ct is too old, or when it would collide with + // incoming sample (sample has priority). + // + // AppendHistogramCTZeroSample has to be called before the corresponding histogram AppendHistogram. + // A series reference number is returned which can be used to modify the + // CT for the given series in the same or later transactions. + // Returned reference numbers are ephemeral and may be rejected in calls + // to AppendHistogramCTZeroSample() at any point. + // + // If the reference is 0 it must not be used for caching. + AppendHistogramCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) } // MetadataUpdater provides an interface for associating metadata to stored series. diff --git a/vendor/github.com/prometheus/prometheus/storage/merge.go b/vendor/github.com/prometheus/prometheus/storage/merge.go index 318f22696ee..3144a0f648e 100644 --- a/vendor/github.com/prometheus/prometheus/storage/merge.go +++ b/vendor/github.com/prometheus/prometheus/storage/merge.go @@ -153,13 +153,18 @@ func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints ) // Schedule all Selects for all queriers we know about. for _, querier := range q.queriers { + // copy the matchers as some queriers may alter the slice. + // See https://github.com/prometheus/prometheus/issues/14723 + matchersCopy := make([]*labels.Matcher, len(matchers)) + copy(matchersCopy, matchers) + wg.Add(1) - go func(qr genericQuerier) { + go func(qr genericQuerier, m []*labels.Matcher) { defer wg.Done() // We need to sort for NewMergeSeriesSet to work. - seriesSetChan <- qr.Select(ctx, true, hints, matchers...) - }(querier) + seriesSetChan <- qr.Select(ctx, true, hints, m...) + }(querier, matchersCopy) } go func() { wg.Wait() diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/client.go b/vendor/github.com/prometheus/prometheus/storage/remote/client.go index 62218cfba91..23775122e56 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/client.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/client.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "net/http" + "net/http/httptrace" "strconv" "strings" "time" @@ -31,6 +32,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/common/sigv4" "github.com/prometheus/common/version" + "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/trace" @@ -213,8 +215,11 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) { if conf.WriteProtoMsg != "" { writeProtoMsg = conf.WriteProtoMsg } - - httpClient.Transport = otelhttp.NewTransport(t) + httpClient.Transport = otelhttp.NewTransport( + t, + otelhttp.WithClientTrace(func(ctx context.Context) *httptrace.ClientTrace { + return otelhttptrace.NewClientTrace(ctx, otelhttptrace.WithoutSubSpans()) + })) return &Client{ remoteName: name, urlString: conf.URL.String(), diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go index fdcd668f565..9306dcb4c28 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go @@ -16,11 +16,11 @@ package remote import ( "context" "errors" + "log/slog" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/scrape" ) @@ -44,7 +44,7 @@ func (noop *noopScrapeManager) Get() (*scrape.Manager, error) { // MetadataWatcher watches the Scrape Manager for a given WriteMetadataTo. type MetadataWatcher struct { name string - logger log.Logger + logger *slog.Logger managerGetter ReadyScrapeManager manager Watchable @@ -62,9 +62,9 @@ type MetadataWatcher struct { } // NewMetadataWatcher builds a new MetadataWatcher. -func NewMetadataWatcher(l log.Logger, mg ReadyScrapeManager, name string, w MetadataAppender, interval model.Duration, deadline time.Duration) *MetadataWatcher { +func NewMetadataWatcher(l *slog.Logger, mg ReadyScrapeManager, name string, w MetadataAppender, interval model.Duration, deadline time.Duration) *MetadataWatcher { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } if mg == nil { @@ -87,7 +87,7 @@ func NewMetadataWatcher(l log.Logger, mg ReadyScrapeManager, name string, w Meta // Start the MetadataWatcher. func (mw *MetadataWatcher) Start() { - level.Info(mw.logger).Log("msg", "Starting scraped metadata watcher") + mw.logger.Info("Starting scraped metadata watcher") mw.hardShutdownCtx, mw.hardShutdownCancel = context.WithCancel(context.Background()) mw.softShutdownCtx, mw.softShutdownCancel = context.WithCancel(mw.hardShutdownCtx) go mw.loop() @@ -95,15 +95,15 @@ func (mw *MetadataWatcher) Start() { // Stop the MetadataWatcher. func (mw *MetadataWatcher) Stop() { - level.Info(mw.logger).Log("msg", "Stopping metadata watcher...") - defer level.Info(mw.logger).Log("msg", "Scraped metadata watcher stopped") + mw.logger.Info("Stopping metadata watcher...") + defer mw.logger.Info("Scraped metadata watcher stopped") mw.softShutdownCancel() select { case <-mw.done: return case <-time.After(mw.deadline): - level.Error(mw.logger).Log("msg", "Failed to flush metadata") + mw.logger.Error("Failed to flush metadata") } mw.hardShutdownCancel() diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go index a112b9bbce2..c22c7613203 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go @@ -49,7 +49,7 @@ func NormalizeLabel(label string) string { // Return '_' for anything non-alphanumeric. func sanitizeRune(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) { + if unicode.IsLower(r) || unicode.IsUpper(r) || unicode.IsDigit(r) { return r } return '_' diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go index 0f472b80a09..36b647f510a 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -237,11 +237,13 @@ func removeSuffix(tokens []string, suffix string) []string { // Clean up specified string so it's Prometheus compliant func CleanUpString(s string) string { - return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }), "_") + return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsUpper(r) && !unicode.IsLower(r) && !unicode.IsDigit(r) }), "_") } func RemovePromForbiddenRunes(s string) string { - return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != '_' && r != ':' }), "_") + return strings.Join(strings.FieldsFunc(s, func(r rune) bool { + return !unicode.IsUpper(r) && !unicode.IsLower(r) && !unicode.IsDigit(r) && r != '_' && r != ':' + }), "_") } // Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 84cd309d6b9..08d72c52f44 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -21,6 +21,7 @@ import ( "encoding/hex" "fmt" "log" + "log/slog" "math" "slices" "sort" @@ -28,8 +29,6 @@ import ( "unicode/utf8" "github.com/cespare/xxhash/v2" - gokitlog "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -245,7 +244,7 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool { // However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets: // https://github.com/prometheus/prometheus/issues/13485. func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, - resource pcommon.Resource, settings Settings, baseName string, logger gokitlog.Logger) error { + resource pcommon.Resource, settings Settings, baseName string, logger *slog.Logger) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -337,7 +336,7 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo labels := createLabels(baseName+createdSuffix, baseLabels) c.addTimeSeriesIfNeeded(labels, startTimestampMs, pt.Timestamp()) } - level.Debug(logger).Log("labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "histogram") + logger.Debug("addHistogramDataPoints", "labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "histogram") } return nil @@ -359,9 +358,17 @@ func getPromExemplars[T exemplarType](ctx context.Context, everyN *everyNTimes, exemplarRunes := 0 promExemplar := prompb.Exemplar{ - Value: exemplar.DoubleValue(), Timestamp: timestamp.FromTime(exemplar.Timestamp().AsTime()), } + switch exemplar.ValueType() { + case pmetric.ExemplarValueTypeInt: + promExemplar.Value = float64(exemplar.IntValue()) + case pmetric.ExemplarValueTypeDouble: + promExemplar.Value = exemplar.DoubleValue() + default: + return nil, fmt.Errorf("unsupported exemplar value type: %v", exemplar.ValueType()) + } + if traceID := exemplar.TraceID(); !traceID.IsEmpty() { val := hex.EncodeToString(traceID[:]) exemplarRunes += utf8.RuneCountInString(traceIDKey) + utf8.RuneCountInString(val) @@ -443,7 +450,7 @@ func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { } func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource, - settings Settings, baseName string, logger gokitlog.Logger) error { + settings Settings, baseName string, logger *slog.Logger) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -500,7 +507,7 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin c.addTimeSeriesIfNeeded(createdLabels, startTimestampMs, pt.Timestamp()) } - level.Debug(logger).Log("labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "summary") + logger.Debug("addSummaryDataPoints", "labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "summary") } return nil @@ -584,7 +591,7 @@ const defaultIntervalForStartTimestamps = int64(300_000) // make use of its direct support fort Created Timestamps instead. // See https://opentelemetry.io/docs/specs/otel/metrics/data-model/#resets-and-gaps to know more about how OTel handles // resets for cumulative metrics. -func (c *PrometheusConverter) handleStartTime(startTs, ts int64, labels []prompb.Label, settings Settings, typ string, value float64, logger gokitlog.Logger) { +func (c *PrometheusConverter) handleStartTime(startTs, ts int64, labels []prompb.Label, settings Settings, typ string, value float64, logger *slog.Logger) { if !settings.EnableCreatedTimestampZeroIngestion { return } @@ -606,7 +613,7 @@ func (c *PrometheusConverter) handleStartTime(startTs, ts int64, labels []prompb return } - level.Debug(logger).Log("msg", "adding zero value at start_ts", "type", typ, "labels", labelsStringer(labels), "start_ts", startTs, "sample_ts", ts, "sample_value", value) + logger.Debug("adding zero value at start_ts", "type", typ, "labels", labelsStringer(labels), "start_ts", startTs, "sample_ts", ts, "sample_value", value) // See https://github.com/prometheus/prometheus/issues/14600 for context. c.addSample(&prompb.Sample{Timestamp: startTs}, labels) @@ -682,10 +689,10 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta return } - ts := convertTimeStamp(timestamp) sample := &prompb.Sample{ - Value: float64(1), - Timestamp: ts, + Value: float64(1), + // convert ns to ms + Timestamp: convertTimeStamp(timestamp), } converter.addSample(sample, labels) } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index 5bd7fc7410a..8349d4f9070 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -60,7 +60,6 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont promName, ) ts, _ := c.getOrCreateTimeSeries(lbls) - ts.Histograms = append(ts.Histograms, histogram) exemplars, err := getPromExemplars[pmetric.ExponentialHistogramDataPoint](ctx, &c.everyN, pt) diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index a74caabc581..a7f41a63164 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -20,11 +20,11 @@ import ( "context" "errors" "fmt" + "log/slog" "sort" "strings" "time" - "github.com/go-kit/log" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" @@ -67,7 +67,7 @@ func NewPrometheusConverter() *PrometheusConverter { } // FromMetrics converts pmetric.Metrics to Prometheus remote write format. -func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings, logger log.Logger) (annots annotations.Annotations, errs error) { +func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings, logger *slog.Logger) (annots annotations.Annotations, errs error) { c.everyN = everyNTimes{n: 128} resourceMetricsSlice := md.ResourceMetrics() for i := 0; i < resourceMetricsSlice.Len(); i++ { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go index af0c7a478b7..30385f8fd2c 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go @@ -18,10 +18,9 @@ package prometheusremotewrite import ( "context" + "log/slog" "math" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -47,9 +46,9 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data model.MetricNameLabel, name, ) - timestamp := convertTimeStamp(pt.Timestamp()) sample := &prompb.Sample{ - Timestamp: timestamp, + // convert ns to ms + Timestamp: convertTimeStamp(pt.Timestamp()), } switch pt.ValueType() { case pmetric.NumberDataPointValueTypeInt: @@ -67,7 +66,7 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data } func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, - resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string, logger log.Logger) error { + resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string, logger *slog.Logger) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -85,8 +84,8 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo model.MetricNameLabel, name, ) - sample := &prompb.Sample{ + // convert ns to ms Timestamp: timestamp, } switch pt.ValueType() { @@ -127,7 +126,7 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo } c.addTimeSeriesIfNeeded(createdLabels, startTimestampMs, pt.Timestamp()) } - level.Debug(logger).Log("labels", labelsStringer(lbls), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "sum") + logger.Debug("addSumNumberDataPoints", "labels", labelsStringer(lbls), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "sum") } return nil diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go index b1c8997268b..9f27c333a6d 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go @@ -17,17 +17,17 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "strconv" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" @@ -407,7 +407,7 @@ type QueueManager struct { reshardDisableStartTimestamp atomic.Int64 // Time that reshard was disabled. reshardDisableEndTimestamp atomic.Int64 // Time that reshard is disabled until. - logger log.Logger + logger *slog.Logger flushDeadline time.Duration cfg config.QueueConfig mcfg config.MetadataConfig @@ -454,7 +454,7 @@ func NewQueueManager( metrics *queueManagerMetrics, watcherMetrics *wlog.WatcherMetrics, readerMetrics *wlog.LiveReaderMetrics, - logger log.Logger, + logger *slog.Logger, dir string, samplesIn *ewmaRate, cfg config.QueueConfig, @@ -471,7 +471,7 @@ func NewQueueManager( protoMsg config.RemoteWriteProtoMsg, ) *QueueManager { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } // Copy externalLabels into a slice, which we need for processExternalLabels. @@ -480,7 +480,7 @@ func NewQueueManager( extLabelsSlice = append(extLabelsSlice, l) }) - logger = log.With(logger, remoteName, client.Name(), endpoint, client.Endpoint()) + logger = logger.With(remoteName, client.Name(), endpoint, client.Endpoint()) t := &QueueManager{ logger: logger, flushDeadline: flushDeadline, @@ -526,7 +526,7 @@ func NewQueueManager( // ships them alongside series. If both mechanisms are set, the new one // takes precedence by implicitly disabling the older one. if t.mcfg.Send && t.protoMsg != config.RemoteWriteProtoMsgV1 { - level.Warn(logger).Log("msg", "usage of 'metadata_config.send' is redundant when using remote write v2 (or higher) as metadata will always be gathered from the WAL and included for every series within each write request") + logger.Warn("usage of 'metadata_config.send' is redundant when using remote write v2 (or higher) as metadata will always be gathered from the WAL and included for every series within each write request") t.mcfg.Send = false } @@ -567,7 +567,7 @@ func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scr err := t.sendMetadataWithBackoff(ctx, mm[i*t.mcfg.MaxSamplesPerSend:last], pBuf) if err != nil { t.metrics.failedMetadataTotal.Add(float64(last - (i * t.mcfg.MaxSamplesPerSend))) - level.Error(t.logger).Log("msg", "non-recoverable error while sending metadata", "count", last-(i*t.mcfg.MaxSamplesPerSend), "err", err) + t.logger.Error("non-recoverable error while sending metadata", "count", last-(i*t.mcfg.MaxSamplesPerSend), "err", err) } } } @@ -706,7 +706,7 @@ outer: if !ok { t.dataDropped.incr(1) if _, ok := t.droppedSeries[s.Ref]; !ok { - level.Info(t.logger).Log("msg", "Dropped sample for series that was not explicitly dropped via relabelling", "ref", s.Ref) + t.logger.Info("Dropped sample for series that was not explicitly dropped via relabelling", "ref", s.Ref) t.metrics.droppedSamplesTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedSamplesTotal.WithLabelValues(reasonDroppedSeries).Inc() @@ -769,7 +769,7 @@ outer: // Track dropped exemplars in the same EWMA for sharding calc. t.dataDropped.incr(1) if _, ok := t.droppedSeries[e.Ref]; !ok { - level.Info(t.logger).Log("msg", "Dropped exemplar for series that was not explicitly dropped via relabelling", "ref", e.Ref) + t.logger.Info("Dropped exemplar for series that was not explicitly dropped via relabelling", "ref", e.Ref) t.metrics.droppedExemplarsTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedExemplarsTotal.WithLabelValues(reasonDroppedSeries).Inc() @@ -825,7 +825,7 @@ outer: if !ok { t.dataDropped.incr(1) if _, ok := t.droppedSeries[h.Ref]; !ok { - level.Info(t.logger).Log("msg", "Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) + t.logger.Info("Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) t.metrics.droppedHistogramsTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedHistogramsTotal.WithLabelValues(reasonDroppedSeries).Inc() @@ -880,7 +880,7 @@ outer: if !ok { t.dataDropped.incr(1) if _, ok := t.droppedSeries[h.Ref]; !ok { - level.Info(t.logger).Log("msg", "Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) + t.logger.Info("Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) t.metrics.droppedHistogramsTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedHistogramsTotal.WithLabelValues(reasonDroppedSeries).Inc() @@ -944,8 +944,8 @@ func (t *QueueManager) Start() { // Stop stops sending samples to the remote storage and waits for pending // sends to complete. func (t *QueueManager) Stop() { - level.Info(t.logger).Log("msg", "Stopping remote storage...") - defer level.Info(t.logger).Log("msg", "Remote storage stopped.") + t.logger.Info("Stopping remote storage...") + defer t.logger.Info("Remote storage stopped.") close(t.quit) t.wg.Wait() @@ -1093,10 +1093,10 @@ func (t *QueueManager) updateShardsLoop() { // to stay close to shardUpdateDuration. select { case t.reshardChan <- desiredShards: - level.Info(t.logger).Log("msg", "Remote storage resharding", "from", t.numShards, "to", desiredShards) + t.logger.Info("Remote storage resharding", "from", t.numShards, "to", desiredShards) t.numShards = desiredShards default: - level.Info(t.logger).Log("msg", "Currently resharding, skipping.") + t.logger.Info("Currently resharding, skipping.") } case <-t.quit: return @@ -1114,14 +1114,14 @@ func (t *QueueManager) shouldReshard(desiredShards int) bool { minSendTimestamp := time.Now().Add(-1 * shardUpdateDuration).Unix() lsts := t.lastSendTimestamp.Load() if lsts < minSendTimestamp { - level.Warn(t.logger).Log("msg", "Skipping resharding, last successful send was beyond threshold", "lastSendTimestamp", lsts, "minSendTimestamp", minSendTimestamp) + t.logger.Warn("Skipping resharding, last successful send was beyond threshold", "lastSendTimestamp", lsts, "minSendTimestamp", minSendTimestamp) return false } if disableTimestamp := t.reshardDisableEndTimestamp.Load(); time.Now().Unix() < disableTimestamp { disabledAt := time.Unix(t.reshardDisableStartTimestamp.Load(), 0) disabledFor := time.Until(time.Unix(disableTimestamp, 0)) - level.Warn(t.logger).Log("msg", "Skipping resharding, resharding is disabled while waiting for recoverable errors", "disabled_at", disabledAt, "disabled_for", disabledFor) + t.logger.Warn("Skipping resharding, resharding is disabled while waiting for recoverable errors", "disabled_at", disabledAt, "disabled_for", disabledFor) return false } return true @@ -1164,7 +1164,7 @@ func (t *QueueManager) calculateDesiredShards() int { desiredShards = timePerSample * (dataInRate*dataKeptRatio + backlogCatchup) ) t.metrics.desiredNumShards.Set(desiredShards) - level.Debug(t.logger).Log("msg", "QueueManager.calculateDesiredShards", + t.logger.Debug("QueueManager.calculateDesiredShards", "dataInRate", dataInRate, "dataOutRate", dataOutRate, "dataKeptRatio", dataKeptRatio, @@ -1182,7 +1182,7 @@ func (t *QueueManager) calculateDesiredShards() int { lowerBound = float64(t.numShards) * (1. - shardToleranceFraction) upperBound = float64(t.numShards) * (1. + shardToleranceFraction) ) - level.Debug(t.logger).Log("msg", "QueueManager.updateShardsLoop", + t.logger.Debug("QueueManager.updateShardsLoop", "lowerBound", lowerBound, "desiredShards", desiredShards, "upperBound", upperBound) desiredShards = math.Ceil(desiredShards) // Round up to be on the safe side. @@ -1193,7 +1193,7 @@ func (t *QueueManager) calculateDesiredShards() int { numShards := int(desiredShards) // Do not downshard if we are more than ten seconds back. if numShards < t.numShards && delay > 10.0 { - level.Debug(t.logger).Log("msg", "Not downsharding due to being too far behind") + t.logger.Debug("Not downsharding due to being too far behind") return t.numShards } @@ -1321,7 +1321,7 @@ func (s *shards) stop() { // Log error for any dropped samples, exemplars, or histograms. logDroppedError := func(t string, counter atomic.Uint32) { if dropped := counter.Load(); dropped > 0 { - level.Error(s.qm.logger).Log("msg", fmt.Sprintf("Failed to flush all %s on shutdown", t), "count", dropped) + s.qm.logger.Error(fmt.Sprintf("Failed to flush all %s on shutdown", t), "count", dropped) } } logDroppedError("samples", s.samplesDroppedOnHardShutdown) @@ -1564,7 +1564,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms) n := nPendingSamples + nPendingExemplars + nPendingHistograms if timer { - level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, + s.qm.logger.Debug("runShard timer ticked, sending buffered data", "samples", nPendingSamples, "exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms) } _ = s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf, enc) @@ -1691,9 +1691,9 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarDiff)) } if err != nil { - level.Error(s.qm.logger).Log("msg", "non-recoverable error", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff, "err", err) + s.qm.logger.Error("non-recoverable error", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff, "err", err) } else if sampleDiff+exemplarDiff+histogramDiff > 0 { - level.Error(s.qm.logger).Log("msg", "we got 2xx status code from the Receiver yet statistics indicate some dat was not written; investigation needed", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff) + s.qm.logger.Error("we got 2xx status code from the Receiver yet statistics indicate some dat was not written; investigation needed", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff) } // These counters are used to calculate the dynamic sharding, and as such @@ -2018,16 +2018,16 @@ func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt switch { case backoffErr.retryAfter > 0: sleepDuration = backoffErr.retryAfter - level.Info(t.logger).Log("msg", "Retrying after duration specified by Retry-After header", "duration", sleepDuration) + t.logger.Info("Retrying after duration specified by Retry-After header", "duration", sleepDuration) case backoffErr.retryAfter < 0: - level.Debug(t.logger).Log("msg", "retry-after cannot be in past, retrying using default backoff mechanism") + t.logger.Debug("retry-after cannot be in past, retrying using default backoff mechanism") } // We should never reshard for a recoverable error; increasing shards could // make the problem worse, particularly if we're getting rate limited. // // reshardDisableTimestamp holds the unix timestamp until which resharding - // is diableld. We'll update that timestamp if the period we were just told + // is disabled. We'll update that timestamp if the period we were just told // to sleep for is newer than the existing disabled timestamp. reshardWaitPeriod := time.Now().Add(time.Duration(sleepDuration) * 2) if oldTS, updated := setAtomicToNewer(&t.reshardDisableEndTimestamp, reshardWaitPeriod.Unix()); updated { @@ -2047,7 +2047,7 @@ func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt // If we make it this far, we've encountered a recoverable error and will retry. onRetry() - level.Warn(t.logger).Log("msg", "Failed to send batch, retrying", "err", err) + t.logger.Warn("Failed to send batch, retrying", "err", err) backoff = sleepDuration * 2 @@ -2147,12 +2147,12 @@ func compressPayload(tmpbuf *[]byte, inp []byte, enc Compression) (compressed [] } } -func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf *[]byte, filter func(prompb.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { +func buildWriteRequest(logger *slog.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf *[]byte, filter func(prompb.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms := buildTimeSeries(timeSeries, filter) if droppedSamples > 0 || droppedExemplars > 0 || droppedHistograms > 0 { - level.Debug(logger).Log("msg", "dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms) + logger.Debug("dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms) } req := &prompb.WriteRequest{ @@ -2185,11 +2185,11 @@ func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metada return compressed, highest, lowest, nil } -func buildV2WriteRequest(logger log.Logger, samples []writev2.TimeSeries, labels []string, pBuf, buf *[]byte, filter func(writev2.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { +func buildV2WriteRequest(logger *slog.Logger, samples []writev2.TimeSeries, labels []string, pBuf, buf *[]byte, filter func(writev2.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms := buildV2TimeSeries(samples, filter) if droppedSamples > 0 || droppedExemplars > 0 || droppedHistograms > 0 { - level.Debug(logger).Log("msg", "dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms) + logger.Debug("dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms) } req := &writev2.Request{ diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go index ffc64c9c3fb..8f2945f9740 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go @@ -16,13 +16,12 @@ package remote import ( "context" "errors" + "log/slog" "net/http" "slices" "strings" "sync" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/config" @@ -34,7 +33,7 @@ import ( ) type readHandler struct { - logger log.Logger + logger *slog.Logger queryable storage.SampleAndChunkQueryable config func() config.Config remoteReadSampleLimit int @@ -46,7 +45,7 @@ type readHandler struct { // NewReadHandler creates a http.Handler that accepts remote read requests and // writes them to the provided queryable. -func NewReadHandler(logger log.Logger, r prometheus.Registerer, queryable storage.SampleAndChunkQueryable, config func() config.Config, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame int) http.Handler { +func NewReadHandler(logger *slog.Logger, r prometheus.Registerer, queryable storage.SampleAndChunkQueryable, config func() config.Config, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame int) http.Handler { h := &readHandler{ logger: logger, queryable: queryable, @@ -140,7 +139,7 @@ func (h *readHandler) remoteReadSamples( } defer func() { if err := querier.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error on querier close", "err", err.Error()) + h.logger.Warn("Error on querier close", "err", err.Error()) } }() @@ -163,7 +162,7 @@ func (h *readHandler) remoteReadSamples( return err } for _, w := range ws { - level.Warn(h.logger).Log("msg", "Warnings on remote read query", "err", w.Error()) + h.logger.Warn("Warnings on remote read query", "err", w.Error()) } for _, ts := range resp.Results[i].Timeseries { ts.Labels = MergeLabels(ts.Labels, sortedExternalLabels) @@ -208,7 +207,7 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re } defer func() { if err := querier.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error()) + h.logger.Warn("Error on chunk querier close", "err", err.Error()) } }() @@ -239,7 +238,7 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re } for _, w := range ws { - level.Warn(h.logger).Log("msg", "Warnings on chunked remote read query", "warnings", w.Error()) + h.logger.Warn("Warnings on chunked remote read query", "warnings", w.Error()) } return nil }(); err != nil { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go index 05634f1798f..14c3c87d936 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go @@ -18,12 +18,13 @@ import ( "crypto/md5" "encoding/hex" "fmt" + "log/slog" "sync" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/config" @@ -51,8 +52,9 @@ type startTimeCallback func() (int64, error) // Storage represents all the remote read and write endpoints. It implements // storage.Storage. type Storage struct { - logger *logging.Deduper - mtx sync.Mutex + deduper *logging.Deduper + logger *slog.Logger + mtx sync.Mutex rws *WriteStorage @@ -62,14 +64,16 @@ type Storage struct { } // NewStorage returns a remote.Storage. -func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWAL bool) *Storage { +func NewStorage(l *slog.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWAL bool) *Storage { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } - logger := logging.Dedupe(l, 1*time.Minute) + deduper := logging.Dedupe(l, 1*time.Minute) + logger := slog.New(deduper) s := &Storage{ logger: logger, + deduper: deduper, localStartTimeCallback: stCallback, } s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, metadataInWAL) @@ -196,7 +200,7 @@ func (s *Storage) LowestSentTimestamp() int64 { // Close the background processing of the storage queues. func (s *Storage) Close() error { - s.logger.Stop() + s.deduper.Stop() s.mtx.Lock() defer s.mtx.Unlock() return s.rws.Close() diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write.go b/vendor/github.com/prometheus/prometheus/storage/remote/write.go index 3d2f1fdfcdb..639f3445209 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write.go @@ -17,13 +17,14 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "sync" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" @@ -57,7 +58,7 @@ var ( // WriteStorage represents all the remote write storage. type WriteStorage struct { - logger log.Logger + logger *slog.Logger reg prometheus.Registerer mtx sync.Mutex @@ -78,9 +79,9 @@ type WriteStorage struct { } // NewWriteStorage creates and runs a WriteStorage. -func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWal bool) *WriteStorage { +func NewWriteStorage(logger *slog.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWal bool) *WriteStorage { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } rws := &WriteStorage{ queues: make(map[string]*QueueManager), @@ -277,6 +278,7 @@ func (rws *WriteStorage) Close() error { type timestampTracker struct { writeStorage *WriteStorage + appendOptions *storage.AppendOptions samples int64 exemplars int64 histograms int64 @@ -284,6 +286,10 @@ type timestampTracker struct { highestRecvTimestamp *maxTimestamp } +func (t *timestampTracker) SetOptions(opts *storage.AppendOptions) { + t.appendOptions = opts +} + // Append implements storage.Appender. func (t *timestampTracker) Append(_ storage.SeriesRef, _ labels.Labels, ts int64, _ float64) (storage.SeriesRef, error) { t.samples++ @@ -306,14 +312,29 @@ func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, return 0, nil } -func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { - // TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write. - // UpadteMetadata is no-op for remote write (where timestampTracker is being used) for now. +func (t *timestampTracker) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, ct int64) (storage.SeriesRef, error) { + t.samples++ + if ct > t.highestTimestamp { + // Theoretically, we should never see a CT zero sample with a timestamp higher than the highest timestamp we've seen so far. + // However, we're not going to enforce that here, as it is not the responsibility of the tracker to enforce this. + t.highestTimestamp = ct + } return 0, nil } -func (t *timestampTracker) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) { - // AppendCTZeroSample is no-op for remote-write for now. +func (t *timestampTracker) AppendHistogramCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, ct int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { + t.histograms++ + if ct > t.highestTimestamp { + // Theoretically, we should never see a CT zero sample with a timestamp higher than the highest timestamp we've seen so far. + // However, we're not going to enforce that here, as it is not the responsibility of the tracker to enforce this. + t.highestTimestamp = ct + } + return 0, nil +} + +func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { + // TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write. + // UpdateMetadata is no-op for remote write (where timestampTracker is being used) for now. return 0, nil } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index 43c72a3571e..99e4392ff5e 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -18,12 +18,11 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "strings" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/client_golang/prometheus" @@ -42,7 +41,7 @@ import ( ) type writeHandler struct { - logger log.Logger + logger *slog.Logger appendable storage.Appendable samplesWithInvalidLabelsTotal prometheus.Counter @@ -58,7 +57,7 @@ const maxAheadTime = 10 * time.Minute // // NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible // as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write. -func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler { +func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler { protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{} for _, acc := range acceptedProtoMsgs { protoMsgs[acc] = struct{}{} @@ -119,7 +118,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { msgType, err := h.parseProtoMsg(contentType) if err != nil { - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err) + h.logger.Error("Error decoding remote write request", "err", err) http.Error(w, err.Error(), http.StatusUnsupportedMediaType) return } @@ -131,7 +130,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } return ret }()) - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err) + h.logger.Error("Error decoding remote write request", "err", err) http.Error(w, err.Error(), http.StatusUnsupportedMediaType) } @@ -142,14 +141,14 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // We could give http.StatusUnsupportedMediaType, but let's assume snappy by default. } else if enc != string(SnappyBlockCompression) { err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, SnappyBlockCompression) - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err) + h.logger.Error("Error decoding remote write request", "err", err) http.Error(w, err.Error(), http.StatusUnsupportedMediaType) } // Read the request body. body, err := io.ReadAll(r.Body) if err != nil { - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error()) + h.logger.Error("Error decoding remote write request", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -157,7 +156,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { decompressed, err := snappy.Decode(nil, body) if err != nil { // TODO(bwplotka): Add more context to responded error? - level.Error(h.logger).Log("msg", "Error decompressing remote write request", "err", err.Error()) + h.logger.Error("Error decompressing remote write request", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -169,7 +168,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var req prompb.WriteRequest if err := proto.Unmarshal(decompressed, &req); err != nil { // TODO(bwplotka): Add more context to responded error? - level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msgType, "err", err.Error()) + h.logger.Error("Error decoding v1 remote write request", "protobuf_message", msgType, "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -180,7 +179,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return default: - level.Error(h.logger).Log("msg", "Error while remote writing the v1 request", "err", err.Error()) + h.logger.Error("Error while remote writing the v1 request", "err", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -193,7 +192,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var req writev2.Request if err := proto.Unmarshal(decompressed, &req); err != nil { // TODO(bwplotka): Add more context to responded error? - level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msgType, "err", err.Error()) + h.logger.Error("Error decoding v2 remote write request", "protobuf_message", msgType, "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -205,7 +204,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err != nil { if errHTTPCode/5 == 100 { // 5xx - level.Error(h.logger).Log("msg", "Error while remote writing the v2 request", "err", err.Error()) + h.logger.Error("Error while remote writing the v2 request", "err", err.Error()) } http.Error(w, err.Error(), errHTTPCode) return @@ -241,11 +240,11 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err // TODO(bwplotka): Even as per 1.0 spec, this should be a 400 error, while other samples are // potentially written. Perhaps unify with fixed writeV2 implementation a bit. if !ls.Has(labels.MetricName) || !ls.IsValid(model.NameValidationScheme) { - level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", ls.String()) + h.logger.Warn("Invalid metric names or labels", "got", ls.String()) samplesWithInvalidLabels++ continue } else if duplicateLabel, hasDuplicate := ls.HasDuplicateLabelNames(); hasDuplicate { - level.Warn(h.logger).Log("msg", "Invalid labels for series.", "labels", ls.String(), "duplicated_label", duplicateLabel) + h.logger.Warn("Invalid labels for series.", "labels", ls.String(), "duplicated_label", duplicateLabel) samplesWithInvalidLabels++ continue } @@ -261,10 +260,10 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err switch { case errors.Is(err, storage.ErrOutOfOrderExemplar): outOfOrderExemplarErrs++ - level.Debug(h.logger).Log("msg", "Out of order exemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + h.logger.Debug("Out of order exemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) default: // Since exemplar storage is still experimental, we don't fail the request on ingestion errors - level.Debug(h.logger).Log("msg", "Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err) + h.logger.Debug("Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err) } } } @@ -276,7 +275,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err } if outOfOrderExemplarErrs > 0 { - _ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) + h.logger.Warn("Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) } if samplesWithInvalidLabels > 0 { h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels)) @@ -293,7 +292,7 @@ func (h *writeHandler) appendV1Samples(app storage.Appender, ss []prompb.Sample, if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(err, storage.ErrOutOfBounds) || errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { - level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp) + h.logger.Error("Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp) } return err } @@ -315,7 +314,7 @@ func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Hist if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(err, storage.ErrOutOfBounds) || errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { - level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp) + h.logger.Error("Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp) } return err } @@ -345,7 +344,7 @@ func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ Wri // On 5xx, we always rollback, because we expect // sender to retry and TSDB is not idempotent. if rerr := app.Rollback(); rerr != nil { - level.Error(h.logger).Log("msg", "writev2 rollback failed on retry-able error", "err", rerr) + h.logger.Error("writev2 rollback failed on retry-able error", "err", rerr) } return WriteResponseStats{}, errHTTPCode, err } @@ -407,7 +406,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * errors.Is(err, storage.ErrDuplicateSampleForTimestamp) || errors.Is(err, storage.ErrTooOldSample) { // TODO(bwplotka): Not too spammy log? - level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", ls.String(), "timestamp", s.Timestamp) + h.logger.Error("Out of order sample from remote write", "err", err.Error(), "series", ls.String(), "timestamp", s.Timestamp) badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } @@ -432,7 +431,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * errors.Is(err, storage.ErrOutOfBounds) || errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { // TODO(bwplotka): Not too spammy log? - level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp) + h.logger.Error("Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp) badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } @@ -450,18 +449,18 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * // Handle append error. if errors.Is(err, storage.ErrOutOfOrderExemplar) { outOfOrderExemplarErrs++ // Maintain old metrics, but technically not needed, given we fail here. - level.Error(h.logger).Log("msg", "Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + h.logger.Error("Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } // TODO(bwplotka): Add strict mode which would trigger rollback of everything if needed. // For now we keep the previously released flow (just error not debug leve) of dropping them without rollback and 5xx. - level.Error(h.logger).Log("msg", "failed to ingest exemplar, emitting error log, but no error for PRW caller", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + h.logger.Error("failed to ingest exemplar, emitting error log, but no error for PRW caller", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) } m := ts.ToMetadata(req.Symbols) if _, err = app.UpdateMetadata(ref, ls, m); err != nil { - level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err) + h.logger.Debug("error while updating metadata from remote write", "err", err) // Metadata is attached to each series, so since Prometheus does not reject sample without metadata information, // we don't report remote write error either. We increment metric instead. samplesWithoutMetadata += rs.AllSamples() - allSamplesSoFar @@ -469,7 +468,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * } if outOfOrderExemplarErrs > 0 { - level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) + h.logger.Warn("Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) } h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels)) @@ -482,7 +481,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // writes them to the provided appendable. -func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable, configFunc func() config.Config, enableCTZeroIngestion bool, validIntervalCTZeroIngestion time.Duration) http.Handler { +func NewOTLPWriteHandler(logger *slog.Logger, appendable storage.Appendable, configFunc func() config.Config, enableCTZeroIngestion bool, validIntervalCTZeroIngestion time.Duration) http.Handler { rwHandler := &writeHandler{ logger: logger, appendable: appendable, @@ -498,7 +497,7 @@ func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable, confi } type otlpWriteHandler struct { - logger log.Logger + logger *slog.Logger rwHandler *writeHandler configFunc func() config.Config enableCTZeroIngestion bool @@ -508,7 +507,7 @@ type otlpWriteHandler struct { func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { req, err := DecodeOTLPWriteRequest(r) if err != nil { - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error()) + h.logger.Error("Error decoding remote write request", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -523,11 +522,11 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ValidIntervalCreatedTimestampZeroIngestion: h.validIntervalCTZeroIngestion, }, h.logger) if err != nil { - level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err) + h.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err) } ws, _ := annots.AsStrings("", 0, 0) if len(ws) > 0 { - level.Warn(h.logger).Log("msg", "Warnings translating OTLP metrics to Prometheus write request", "warnings", ws) + h.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws) } err = h.rwHandler.write(r.Context(), &prompb.WriteRequest{ @@ -541,7 +540,7 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return default: - level.Error(h.logger).Log("msg", "Error appending remote write", "err", err.Error()) + h.logger.Error("Error appending remote write", "err", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) return } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go index 1ec331694a3..007564c2734 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go @@ -20,16 +20,17 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "slices" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -286,7 +287,7 @@ func readMetaFile(dir string) (*BlockMeta, int64, error) { return &m, int64(len(b)), nil } -func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) (int64, error) { +func writeMetaFile(logger *slog.Logger, dir string, meta *BlockMeta) (int64, error) { meta.Version = metaVersion1 // Make any changes to the file appear atomic. @@ -294,7 +295,7 @@ func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) (int64, error tmp := path + ".tmp" defer func() { if err := os.RemoveAll(tmp); err != nil { - level.Error(logger).Log("msg", "remove tmp file", "err", err.Error()) + logger.Error("remove tmp file", "err", err.Error()) } }() @@ -340,7 +341,7 @@ type Block struct { indexr IndexReader tombstones tombstones.Reader - logger log.Logger + logger *slog.Logger numBytesChunks int64 numBytesIndex int64 @@ -350,14 +351,14 @@ type Block struct { // OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used // to instantiate chunk structs. -func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (pb *Block, err error) { +func OpenBlock(logger *slog.Logger, dir string, pool chunkenc.Pool) (pb *Block, err error) { return OpenBlockWithOptions(logger, dir, pool, nil, DefaultPostingsForMatchersCacheTTL, DefaultPostingsForMatchersCacheMaxItems, DefaultPostingsForMatchersCacheMaxBytes, DefaultPostingsForMatchersCacheForce) } // OpenBlockWithOptions is like OpenBlock but allows to pass a cache provider and sharding function. -func OpenBlockWithOptions(logger log.Logger, dir string, pool chunkenc.Pool, cache index.ReaderCacheProvider, postingsCacheTTL time.Duration, postingsCacheMaxItems int, postingsCacheMaxBytes int64, postingsCacheForce bool) (pb *Block, err error) { +func OpenBlockWithOptions(logger *slog.Logger, dir string, pool chunkenc.Pool, cache index.ReaderCacheProvider, postingsCacheTTL time.Duration, postingsCacheMaxItems int, postingsCacheMaxBytes int64, postingsCacheForce bool) (pb *Block, err error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } var closers []io.Closer defer func() { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go index 232ec2b9148..63f82e28df0 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go @@ -17,11 +17,10 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "os" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/prometheus/prometheus/model/timestamp" @@ -31,7 +30,7 @@ import ( // BlockWriter is a block writer that allows appending and flushing series to disk. type BlockWriter struct { - logger log.Logger + logger *slog.Logger destinationDir string head *Head @@ -50,7 +49,7 @@ var ErrNoSeriesAppended = errors.New("no series appended, aborting") // contains anything at all. It is the caller's responsibility to // ensure that the resulting blocks do not overlap etc. // Writer ensures the block flush is atomic (via rename). -func NewBlockWriter(logger log.Logger, dir string, blockSize int64) (*BlockWriter, error) { +func NewBlockWriter(logger *slog.Logger, dir string, blockSize int64) (*BlockWriter, error) { w := &BlockWriter{ logger: logger, destinationDir: dir, @@ -95,7 +94,7 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) { // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Because of this block intervals are always +1 than the total samples it includes. maxt := w.head.MaxTime() + 1 - level.Info(w.logger).Log("msg", "flushing", "series_count", w.head.NumSeries(), "mint", timestamp.Time(mint), "maxt", timestamp.Time(maxt)) + w.logger.Info("flushing", "series_count", w.head.NumSeries(), "mint", timestamp.Time(mint), "maxt", timestamp.Time(maxt)) compactor, err := NewLeveledCompactor(ctx, nil, @@ -121,7 +120,7 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) { func (w *BlockWriter) Close() error { defer func() { if err := os.RemoveAll(w.chunkDir); err != nil { - level.Error(w.logger).Log("msg", "error in deleting BlockWriter files", "err", err) + w.logger.Error("error in deleting BlockWriter files", "err", err) } }() return w.head.Close() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go index 8cc59f3ea76..6e01798f720 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go @@ -86,8 +86,8 @@ func (b *bstream) writeBit(bit bit) { func (b *bstream) writeByte(byt byte) { if b.count == 0 { - b.stream = append(b.stream, 0) - b.count = 8 + b.stream = append(b.stream, byt) + return } i := len(b.stream) - 1 @@ -95,10 +95,8 @@ func (b *bstream) writeByte(byt byte) { // Complete the last byte with the leftmost b.count bits from byt. b.stream[i] |= byt >> (8 - b.count) - b.stream = append(b.stream, 0) - i++ // Write the remainder, if any. - b.stream[i] = byt << b.count + b.stream = append(b.stream, byt< @@ -191,8 +191,8 @@ func (a *xorAppender) Append(t int64, v float64) { case dod == 0: a.b.writeBit(zero) case bitRange(dod, 14): - a.b.writeBits(0b10, 2) - a.b.writeBits(uint64(dod), 14) + a.b.writeByte(0b10<<6 | (uint8(dod>>8) & (1<<6 - 1))) // 0b10 size code combined with 6 bits of dod. + a.b.writeByte(uint8(dod)) // Bottom 8 bits of dod. case bitRange(dod, 17): a.b.writeBits(0b110, 3) a.b.writeBits(uint64(dod), 17) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go index 86bd60381f8..19ff19ce136 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go @@ -24,7 +24,7 @@ import ( ) const ( - // Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkrefMap to shrink it again. + // Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkRefMap to shrink it again. chunkRefMapShrinkThreshold = 1000 // Minimum interval between shrinking of chunkWriteQueue.chunkRefMap. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index 9b57f28edd2..c176c804839 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -19,16 +19,16 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "slices" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "go.uber.org/atomic" "golang.org/x/sync/semaphore" @@ -88,7 +88,7 @@ type Compactor interface { // LeveledCompactor implements the Compactor interface. type LeveledCompactor struct { metrics *CompactorMetrics - logger log.Logger + logger *slog.Logger ranges []int64 chunkPool chunkenc.Pool ctx context.Context @@ -176,7 +176,7 @@ type LeveledCompactorOptions struct { EnableOverlappingCompaction bool } -func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { +func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{ MaxBlockChunkSegmentSize: maxBlockChunkSegmentSize, MergeFunc: mergeFunc, @@ -184,14 +184,14 @@ func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Register }) } -func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { +func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{ MergeFunc: mergeFunc, EnableOverlappingCompaction: true, }) } -func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts LeveledCompactorOptions) (*LeveledCompactor, error) { +func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts LeveledCompactorOptions) (*LeveledCompactor, error) { if len(ranges) == 0 { return nil, fmt.Errorf("at least one range must be provided") } @@ -199,7 +199,7 @@ func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer pool = chunkenc.NewPool() } if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } mergeFunc := opts.MergeFunc if mergeFunc == nil { @@ -550,8 +550,8 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, meta := outBlocks[ix].meta if meta.Stats.NumSamples == 0 { - level.Info(c.logger).Log( - "msg", "compact blocks resulted in empty block", + c.logger.Info( + "compact blocks resulted in empty block", "count", len(blocks), "sources", fmt.Sprintf("%v", uids), "duration", time.Since(start), @@ -561,8 +561,8 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, allOutputBlocksAreEmpty = false ulids[ix] = outBlocks[ix].meta.ULID - level.Info(c.logger).Log( - "msg", "compact blocks", + c.logger.Info( + "compact blocks", "count", len(blocks), "mint", meta.MinTime, "maxt", meta.MaxTime, @@ -580,8 +580,8 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, b.meta.Compaction.Deletable = true n, err := writeMetaFile(c.logger, b.dir, &b.meta) if err != nil { - level.Error(c.logger).Log( - "msg", "Failed to write 'Deletable' to meta file after compaction", + c.logger.Error( + "Failed to write 'Deletable' to meta file after compaction", "ulid", b.meta.ULID, ) } @@ -680,12 +680,12 @@ func (c *LeveledCompactor) compactOOO(dest string, oooHead *OOOCompactionHead, s for _, ob := range obs { if ob.tmpDir != "" { if removeErr := os.RemoveAll(ob.tmpDir); removeErr != nil { - level.Error(c.logger).Log("msg", "Failed to remove temp folder after failed compaction", "dir", ob.tmpDir, "err", removeErr.Error()) + c.logger.Error("Failed to remove temp folder after failed compaction", "dir", ob.tmpDir, "err", removeErr.Error()) } } if ob.blockDir != "" { if removeErr := os.RemoveAll(ob.blockDir); removeErr != nil { - level.Error(c.logger).Log("msg", "Failed to remove block folder after failed compaction", "dir", ob.blockDir, "err", removeErr.Error()) + c.logger.Error("Failed to remove block folder after failed compaction", "dir", ob.blockDir, "err", removeErr.Error()) } } } @@ -700,8 +700,8 @@ func (c *LeveledCompactor) compactOOO(dest string, oooHead *OOOCompactionHead, s meta := outBlocks[ix][jx].meta if meta.Stats.NumSamples != 0 { noOOOBlock = false - level.Info(c.logger).Log( - "msg", "compact ooo head", + c.logger.Info( + "compact ooo head", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID, @@ -716,8 +716,8 @@ func (c *LeveledCompactor) compactOOO(dest string, oooHead *OOOCompactionHead, s } if noOOOBlock { - level.Info(c.logger).Log( - "msg", "compact ooo head resulted in no blocks", + c.logger.Info( + "compact ooo head resulted in no blocks", "duration", time.Since(start), ) return nil, nil @@ -754,8 +754,8 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b } if meta.Stats.NumSamples == 0 { - level.Info(c.logger).Log( - "msg", "write block resulted in empty block", + c.logger.Info( + "write block resulted in empty block", "mint", meta.MinTime, "maxt", meta.MaxTime, "duration", time.Since(start), @@ -763,8 +763,8 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b return nil, nil } - level.Info(c.logger).Log( - "msg", "write block", + c.logger.Info( + "write block", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID, @@ -804,7 +804,7 @@ func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blockPop if ob.tmpDir != "" { // RemoveAll returns no error when tmp doesn't exist so it is safe to always run it. if removeErr := os.RemoveAll(ob.tmpDir); removeErr != nil { - level.Error(c.logger).Log("msg", "Failed to remove temp folder after failed compaction", "dir", ob.tmpDir, "err", removeErr.Error()) + c.logger.Error("Failed to remove temp folder after failed compaction", "dir", ob.tmpDir, "err", removeErr.Error()) } } @@ -814,7 +814,7 @@ func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blockPop if err != nil && ob.blockDir != "" { // RemoveAll returns no error when tmp doesn't exist so it is safe to always run it. if removeErr := os.RemoveAll(ob.blockDir); removeErr != nil { - level.Error(c.logger).Log("msg", "Failed to remove block folder after failed compaction", "dir", ob.blockDir, "err", removeErr.Error()) + c.logger.Error("Failed to remove block folder after failed compaction", "dir", ob.blockDir, "err", removeErr.Error()) } } } @@ -939,7 +939,7 @@ func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blockPop return nil } -func debugOutOfOrderChunks(lbls labels.Labels, chks []chunks.Meta, logger log.Logger) { +func debugOutOfOrderChunks(lbls labels.Labels, chks []chunks.Meta, logger *slog.Logger) { if len(chks) <= 1 { return } @@ -955,7 +955,6 @@ func debugOutOfOrderChunks(lbls labels.Labels, chks []chunks.Meta, logger log.Lo // Looks like the chunk is out of order. logValues := []any{ - "msg", "found out-of-order chunk when compacting", "num_chunks_for_series", len(chks), "index", i, "labels", lbls.String(), @@ -983,7 +982,7 @@ func debugOutOfOrderChunks(lbls labels.Labels, chks []chunks.Meta, logger log.Lo ) } - level.Warn(logger).Log(logValues...) + logger.Warn("found out-of-order chunk when compacting", logValues...) } } @@ -992,7 +991,7 @@ func timeFromMillis(ms int64) time.Time { } type BlockPopulator interface { - PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, concurrencyOpts LeveledCompactorConcurrencyOptions, blocks []BlockReader, minT, maxT int64, outBlocks []shardedBlock, postingsFunc IndexReaderPostingsFunc) error + PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger *slog.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, concurrencyOpts LeveledCompactorConcurrencyOptions, blocks []BlockReader, minT, maxT int64, outBlocks []shardedBlock, postingsFunc IndexReaderPostingsFunc) error } // IndexReaderPostingsFunc is a function to get a sorted posting iterator from a given index reader. @@ -1015,7 +1014,7 @@ type DefaultBlockPopulator struct{} // It expects sorted blocks input by mint. // If there is more than 1 output block, each output block will only contain series that hash into its shard // (based on total number of output blocks). -func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, concurrencyOpts LeveledCompactorConcurrencyOptions, blocks []BlockReader, minT, maxT int64, outBlocks []shardedBlock, postingsFunc IndexReaderPostingsFunc) (err error) { +func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger *slog.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, concurrencyOpts LeveledCompactorConcurrencyOptions, blocks []BlockReader, minT, maxT int64, outBlocks []shardedBlock, postingsFunc IndexReaderPostingsFunc) (err error) { if len(blocks) == 0 { return errors.New("cannot populate block(s) from no readers") } @@ -1049,7 +1048,7 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa if i > 0 && b.Meta().MinTime < globalMaxt { metrics.OverlappingBlocks.Inc() overlapping = true - level.Info(logger).Log("msg", "Found overlapping blocks during compaction") + logger.Info("Found overlapping blocks during compaction") } if b.Meta().MaxTime > globalMaxt { globalMaxt = b.Meta().MaxTime @@ -1319,7 +1318,7 @@ func populateSymbols(ctx context.Context, mergeFunc storage.VerticalChunkSeriesM } // Returns opened blocks, and blocks that should be closed (also returned in case of error). -func openBlocksForCompaction(dirs []string, open []*Block, logger log.Logger, pool chunkenc.Pool, concurrency int) (blocks, blocksToClose []*Block, _ error) { +func openBlocksForCompaction(dirs []string, open []*Block, logger *slog.Logger, pool chunkenc.Pool, concurrency int) (blocks, blocksToClose []*Block, _ error) { blocks = make([]*Block, 0, len(dirs)) blocksToClose = make([]*Block, 0, len(dirs)) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index 9f40a2c4999..7fb4cac931c 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "io/fs" + "log/slog" "math" "math/rand" "os" @@ -29,10 +30,9 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "go.uber.org/atomic" "golang.org/x/sync/errgroup" @@ -274,7 +274,7 @@ type Options struct { BlockChunkQuerierFunc BlockChunkQuerierFunc } -type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) +type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{} @@ -288,7 +288,7 @@ type DB struct { dir string locker *tsdbutil.DirLocker - logger log.Logger + logger *slog.Logger metrics *dbMetrics opts *Options chunkPool chunkenc.Pool @@ -479,7 +479,7 @@ var ErrClosed = errors.New("db already closed") // Current implementation doesn't support concurrency so // all API calls should happen in the same go routine. type DBReadOnly struct { - logger log.Logger + logger *slog.Logger dir string sandboxDir string closers []io.Closer @@ -487,7 +487,7 @@ type DBReadOnly struct { } // OpenDBReadOnly opens DB in the given directory for read only operations. -func OpenDBReadOnly(dir, sandboxDirRoot string, l log.Logger) (*DBReadOnly, error) { +func OpenDBReadOnly(dir, sandboxDirRoot string, l *slog.Logger) (*DBReadOnly, error) { if _, err := os.Stat(dir); err != nil { return nil, fmt.Errorf("opening the db dir: %w", err) } @@ -501,7 +501,7 @@ func OpenDBReadOnly(dir, sandboxDirRoot string, l log.Logger) (*DBReadOnly, erro } if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } return &DBReadOnly{ @@ -700,7 +700,7 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { if len(corrupted) > 0 { for _, b := range loadable { if err := b.Close(); err != nil { - level.Warn(db.logger).Log("msg", "Closing block failed", "err", err, "block", b) + db.logger.Warn("Closing block failed", "err", err, "block", b) } } errs := tsdb_errors.NewMulti() @@ -732,7 +732,7 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { blockMetas = append(blockMetas, b.Meta()) } if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 { - level.Warn(db.logger).Log("msg", "Overlapping blocks found during opening", "detail", overlaps.String()) + db.logger.Warn("Overlapping blocks found during opening", "detail", overlaps.String()) } // Close all previously open readers and add the new ones to the cache. @@ -810,7 +810,7 @@ func (db *DBReadOnly) Close() error { defer func() { // Delete the temporary sandbox directory that was created when opening the DB. if err := os.RemoveAll(db.sandboxDir); err != nil { - level.Error(db.logger).Log("msg", "delete sandbox dir", "err", err) + db.logger.Error("delete sandbox dir", "err", err) } }() select { @@ -824,7 +824,7 @@ func (db *DBReadOnly) Close() error { } // Open returns a new DB in the given directory. If options are empty, DefaultOptions will be used. -func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, stats *DBStats) (db *DB, err error) { +func Open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, stats *DBStats) (db *DB, err error) { var rngs []int64 opts, rngs = validateOpts(opts, nil) @@ -877,12 +877,12 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) { // open returns a new DB in the given directory. // It initializes the lockfile, WAL, compactor, and Head (by replaying the WAL), and runs the database. // It is not safe to open more than one DB in the same directory. -func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs []int64, stats *DBStats) (_ *DB, returnedErr error) { +func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rngs []int64, stats *DBStats) (_ *DB, returnedErr error) { if err := os.MkdirAll(dir, 0o777); err != nil { return nil, err } if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } if stats == nil { stats = NewDBStats() @@ -1067,17 +1067,17 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs db.head.metrics.walCorruptionsTotal.Inc() var e *errLoadWbl if errors.As(initErr, &e) { - level.Warn(db.logger).Log("msg", "Encountered WBL read error, attempting repair", "err", initErr) + db.logger.Warn("Encountered WBL read error, attempting repair", "err", initErr) if err := wbl.Repair(e.err); err != nil { return nil, fmt.Errorf("repair corrupted WBL: %w", err) } - level.Info(db.logger).Log("msg", "Successfully repaired WBL") + db.logger.Info("Successfully repaired WBL") } else { - level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr) + db.logger.Warn("Encountered WAL read error, attempting repair", "err", initErr) if err := wal.Repair(initErr); err != nil { return nil, fmt.Errorf("repair corrupted WAL: %w", err) } - level.Info(db.logger).Log("msg", "Successfully repaired WAL") + db.logger.Info("Successfully repaired WAL") } } @@ -1095,7 +1095,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs return db, nil } -func removeBestEffortTmpDirs(l log.Logger, dir string) error { +func removeBestEffortTmpDirs(l *slog.Logger, dir string) error { files, err := os.ReadDir(dir) if os.IsNotExist(err) { return nil @@ -1106,10 +1106,10 @@ func removeBestEffortTmpDirs(l log.Logger, dir string) error { for _, f := range files { if isTmpDir(f) { if err := os.RemoveAll(filepath.Join(dir, f.Name())); err != nil { - level.Error(l).Log("msg", "failed to delete tmp block dir", "dir", filepath.Join(dir, f.Name()), "err", err) + l.Error("failed to delete tmp block dir", "dir", filepath.Join(dir, f.Name()), "err", err) continue } - level.Info(l).Log("msg", "Found and deleted tmp block dir", "dir", filepath.Join(dir, f.Name())) + l.Info("Found and deleted tmp block dir", "dir", filepath.Join(dir, f.Name())) } } return nil @@ -1147,7 +1147,7 @@ func (db *DB) run(ctx context.Context) { case <-time.After(1 * time.Minute): db.cmtx.Lock() if err := db.reloadBlocks(); err != nil { - level.Error(db.logger).Log("msg", "reloadBlocks", "err", err) + db.logger.Error("reloadBlocks", "err", err) } db.cmtx.Unlock() @@ -1163,7 +1163,7 @@ func (db *DB) run(ctx context.Context) { db.autoCompactMtx.Lock() if db.autoCompact { if err := db.Compact(ctx); err != nil { - level.Error(db.logger).Log("msg", "compaction failed", "err", err) + db.logger.Error("compaction failed", "err", err) backoff = exponential(backoff, 1*time.Second, 1*time.Minute) } else { backoff = 0 @@ -1377,8 +1377,8 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { compactionDuration := time.Since(start) if compactionDuration.Milliseconds() > db.head.chunkRange.Load() { - level.Warn(db.logger).Log( - "msg", "Head compaction took longer than the block time range, compactions are falling behind and won't be able to catch up", + db.logger.Warn( + "Head compaction took longer than the block time range, compactions are falling behind and won't be able to catch up", "duration", compactionDuration.String(), "block_range", db.head.chunkRange.Load(), ) @@ -1503,26 +1503,46 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID meta := &BlockMeta{} meta.Compaction.SetOutOfOrder() - for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t += blockSize { - mint, maxt := t, t+blockSize + runCompaction := func(mint, maxt int64) error { // Block intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes. uids, err := db.compactor.Write(dest, oooHead.CloneForTimeRange(mint, maxt-1), mint, maxt, meta) if err != nil { - return nil, err + return err } ulids = append(ulids, uids...) + return nil + } + + day := 24 * time.Hour.Milliseconds() + maxtFor24hBlock := day * (db.Head().MaxTime() / day) + + // 24h blocks for data that is for the previous days + for t := day * (oooHeadMint / day); t < maxtFor24hBlock; t += day { + if err := runCompaction(t, t+day); err != nil { + return nil, err + } + } + + oooStart := oooHeadMint + if oooStart < maxtFor24hBlock { + oooStart = maxtFor24hBlock + } + for t := blockSize * (oooStart / blockSize); t <= oooHeadMaxt; t += blockSize { + if err := runCompaction(t, t+blockSize); err != nil { + return nil, err + } } if len(ulids) == 0 { - level.Info(db.logger).Log( - "msg", "compact ooo head resulted in no blocks", + db.logger.Info( + "compact ooo head resulted in no blocks", "duration", time.Since(start), ) return nil, nil } - level.Info(db.logger).Log( - "msg", "out-of-order compaction completed", + db.logger.Info( + "out-of-order compaction completed", "duration", time.Since(start), "ulids", fmt.Sprintf("%v", ulids), ) @@ -1567,7 +1587,7 @@ func (db *DB) compactBlocks() (err error) { // long enough that we end up with a HEAD block that needs to be written. // Check if that's the case and stop compactions early. if db.head.compactable() && !db.waitingForCompactionDelay() { - level.Warn(db.logger).Log("msg", "aborting block compactions to persit the head block") + db.logger.Warn("aborting block compactions to persit the head block") return nil } @@ -1663,7 +1683,7 @@ func (db *DB) reloadBlocks() (err error) { for _, b := range block.Meta().Compaction.Parents { if _, ok := corrupted[b.ULID]; ok { delete(corrupted, b.ULID) - level.Warn(db.logger).Log("msg", "Found corrupted block, but replaced by compacted one so it's safe to delete. This should not happen with atomic deletes.", "block", b.ULID) + db.logger.Warn("Found corrupted block, but replaced by compacted one so it's safe to delete. This should not happen with atomic deletes.", "block", b.ULID) } deletable[b.ULID] = nil } @@ -1725,7 +1745,7 @@ func (db *DB) reloadBlocks() (err error) { blockMetas = append(blockMetas, b.Meta()) } if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 { - level.Warn(db.logger).Log("msg", "Overlapping blocks found during reloadBlocks", "detail", overlaps.String()) + db.logger.Warn("Overlapping blocks found during reloadBlocks", "detail", overlaps.String()) } } @@ -1741,7 +1761,7 @@ func (db *DB) reloadBlocks() (err error) { return nil } -func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool, cache *hashcache.SeriesHashCache, postingsCacheTTL time.Duration, postingsCacheMaxItems int, postingsCacheMaxBytes int64, postingsCacheForce bool) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { +func openBlocks(l *slog.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool, cache *hashcache.SeriesHashCache, postingsCacheTTL time.Duration, postingsCacheMaxItems int, postingsCacheMaxBytes int64, postingsCacheForce bool) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { bDirs, err := blockDirs(dir) if err != nil { return nil, nil, fmt.Errorf("find blocks: %w", err) @@ -1751,7 +1771,7 @@ func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Po for _, bDir := range bDirs { meta, _, err := readMetaFile(bDir) if err != nil { - level.Error(l).Log("msg", "Failed to read meta.json for a block during reloadBlocks. Skipping", "dir", bDir, "err", err) + l.Error("Failed to read meta.json for a block during reloadBlocks. Skipping", "dir", bDir, "err", err) continue } @@ -1873,7 +1893,7 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { for ulid, block := range blocks { if block != nil { if err := block.Close(); err != nil { - level.Warn(db.logger).Log("msg", "Closing block failed", "err", err, "block", ulid) + db.logger.Warn("Closing block failed", "err", err, "block", ulid) } } @@ -1894,7 +1914,7 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { if err := os.RemoveAll(tmpToDelete); err != nil { return fmt.Errorf("delete obsolete block %s: %w", ulid, err) } - level.Info(db.logger).Log("msg", "Deleting obsolete block", "block", ulid) + db.logger.Info("Deleting obsolete block", "block", ulid) } return nil @@ -2062,7 +2082,7 @@ func (db *DB) DisableCompactions() { defer db.autoCompactMtx.Unlock() db.autoCompact = false - level.Info(db.logger).Log("msg", "Compactions disabled") + db.logger.Info("Compactions disabled") } // EnableCompactions enables auto compactions. @@ -2071,7 +2091,7 @@ func (db *DB) EnableCompactions() { defer db.autoCompactMtx.Unlock() db.autoCompact = true - level.Info(db.logger).Log("msg", "Compactions enabled") + db.logger.Info("Compactions enabled") } func (db *DB) generateCompactionDelay() time.Duration { @@ -2101,7 +2121,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error { defer db.mtx.RUnlock() for _, b := range db.blocks { - level.Info(db.logger).Log("msg", "Snapshotting block", "block", b) + db.logger.Info("Snapshotting block", "block", b) if err := b.Snapshot(dir); err != nil { return fmt.Errorf("error snapshotting block: %s: %w", b.Dir(), err) @@ -2372,7 +2392,7 @@ func (db *DB) CleanTombstones() (err error) { for _, uid := range uids { dir := filepath.Join(db.Dir(), uid.String()) if err := os.RemoveAll(dir); err != nil { - level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) + db.logger.Error("failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) } } if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go b/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go index 88fdd30c850..cc7d0990f6a 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go @@ -20,7 +20,6 @@ import ( "hash" "hash/crc32" "math" - "unsafe" "github.com/dennwc/varint" ) @@ -75,8 +74,7 @@ func (e *Encbuf) PutVarint64(x int64) { // PutUvarintStr writes a string to the buffer prefixed by its varint length (in bytes!). func (e *Encbuf) PutUvarintStr(s string) { - b := *(*[]byte)(unsafe.Pointer(&s)) - e.PutUvarint(len(b)) + e.PutUvarint(len(s)) e.PutString(s) } @@ -201,8 +199,9 @@ func (d *Decbuf) UvarintStr() string { return string(d.UvarintBytes()) } -// UvarintBytes returns invalid values if the byte slice goes away. -// Compared to UvarintStr, it avoid allocations. +// UvarintBytes returns a pointer to internal data; +// the return value becomes invalid if the byte slice goes away. +// Compared to UvarintStr, this avoids allocations. func (d *Decbuf) UvarintBytes() []byte { l := d.Uvarint64() if d.E != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go b/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go index 8f39377de0c..31d461bed9e 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go @@ -29,7 +29,7 @@ import ( ) const ( - // Indicates that there is no index entry for an exmplar. + // Indicates that there is no index entry for an exemplar. noExemplar = -1 // Estimated number of exemplars per series, for sizing the index. estimatedExemplarsPerSeries = 16 diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 9f175b446d3..324b0a60607 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "path/filepath" "runtime" @@ -25,10 +26,9 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "go.uber.org/atomic" "github.com/prometheus/prometheus/config" @@ -83,7 +83,7 @@ type Head struct { wal, wbl *wlog.WL exemplarMetrics *ExemplarMetrics exemplars ExemplarStorage - logger log.Logger + logger *slog.Logger appendPool zeropool.Pool[[]record.RefSample] exemplarsPool zeropool.Pool[[]exemplarWithSeriesRef] histogramsPool zeropool.Pool[[]record.RefHistogramSample] @@ -157,10 +157,6 @@ type HeadOptions struct { // OutOfOrderTimeWindow is > 0 EnableOOONativeHistograms atomic.Bool - // EnableCreatedTimestampZeroIngestion enables the ingestion of the created timestamp as a synthetic zero sample. - // See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md - EnableCreatedTimestampZeroIngestion bool - ChunkRange int64 // ChunkDirRoot is the parent directory of the chunks directory. ChunkDirRoot string @@ -248,10 +244,10 @@ type SeriesLifecycleCallback interface { } // NewHead opens the head block in dir. -func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wlog.WL, opts *HeadOptions, stats *HeadStats) (*Head, error) { +func NewHead(r prometheus.Registerer, l *slog.Logger, wal, wbl *wlog.WL, opts *HeadOptions, stats *HeadStats) (*Head, error) { var err error if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } if opts.OutOfOrderTimeWindow.Load() < 0 { @@ -597,7 +593,7 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { }, func() float64 { val, err := h.chunkDiskMapper.Size() if err != nil { - level.Error(h.logger).Log("msg", "Failed to calculate size of \"chunks_head\" dir", + h.logger.Error("Failed to calculate size of \"chunks_head\" dir", "err", err.Error()) } return float64(val) @@ -660,7 +656,7 @@ func (h *Head) Init(minValidTime int64) error { } }() - level.Info(h.logger).Log("msg", "Replaying on-disk memory mappable chunks if any") + h.logger.Info("Replaying on-disk memory mappable chunks if any") start := time.Now() snapIdx, snapOffset := -1, 0 @@ -669,7 +665,7 @@ func (h *Head) Init(minValidTime int64) error { snapshotLoaded := false var chunkSnapshotLoadDuration time.Duration if h.opts.EnableMemorySnapshotOnShutdown { - level.Info(h.logger).Log("msg", "Chunk snapshot is enabled, replaying from the snapshot") + h.logger.Info("Chunk snapshot is enabled, replaying from the snapshot") // If there are any WAL files, there should be at least one WAL file with an index that is current or newer // than the snapshot index. If the WAL index is behind the snapshot index somehow, the snapshot is assumed // to be outdated. @@ -682,14 +678,14 @@ func (h *Head) Init(minValidTime int64) error { _, idx, _, err := LastChunkSnapshot(h.opts.ChunkDirRoot) if err != nil && !errors.Is(err, record.ErrNotFound) { - level.Error(h.logger).Log("msg", "Could not find last snapshot", "err", err) + h.logger.Error("Could not find last snapshot", "err", err) } if err == nil && endAt < idx { loadSnapshot = false - level.Warn(h.logger).Log("msg", "Last WAL file is behind snapshot, removing snapshots") + h.logger.Warn("Last WAL file is behind snapshot, removing snapshots") if err := DeleteChunkSnapshots(h.opts.ChunkDirRoot, math.MaxInt, math.MaxInt); err != nil { - level.Error(h.logger).Log("msg", "Error while deleting snapshot directories", "err", err) + h.logger.Error("Error while deleting snapshot directories", "err", err) } } } @@ -699,14 +695,14 @@ func (h *Head) Init(minValidTime int64) error { if err == nil { snapshotLoaded = true chunkSnapshotLoadDuration = time.Since(start) - level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", chunkSnapshotLoadDuration.String()) + h.logger.Info("Chunk snapshot loading time", "duration", chunkSnapshotLoadDuration.String()) } if err != nil { snapIdx, snapOffset = -1, 0 refSeries = make(map[chunks.HeadSeriesRef]*memSeries) h.metrics.snapshotReplayErrorTotal.Inc() - level.Error(h.logger).Log("msg", "Failed to load chunk snapshot", "err", err) + h.logger.Error("Failed to load chunk snapshot", "err", err) // We clear the partially loaded data to replay fresh from the WAL. if err := h.resetInMemoryState(); err != nil { return err @@ -730,7 +726,7 @@ func (h *Head) Init(minValidTime int64) error { mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.loadMmappedChunks(refSeries) if err != nil { // TODO(codesome): clear out all m-map chunks here for refSeries. - level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err) + h.logger.Error("Loading on-disk chunks failed", "err", err) var cerr *chunks.CorruptionErr if errors.As(err, &cerr) { h.metrics.mmapChunkCorruptionTotal.Inc() @@ -747,15 +743,15 @@ func (h *Head) Init(minValidTime int64) error { } } mmapChunkReplayDuration = time.Since(mmapChunkReplayStart) - level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", mmapChunkReplayDuration.String()) + h.logger.Info("On-disk memory mappable chunks replay completed", "duration", mmapChunkReplayDuration.String()) } if h.wal == nil { - level.Info(h.logger).Log("msg", "WAL not found") + h.logger.Info("WAL not found") return nil } - level.Info(h.logger).Log("msg", "Replaying WAL, this may take a while") + h.logger.Info("Replaying WAL, this may take a while") checkpointReplayStart := time.Now() // Backfill the checkpoint first if it exists. @@ -781,7 +777,7 @@ func (h *Head) Init(minValidTime int64) error { } defer func() { if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err) + h.logger.Warn("Error while closing the wal segments reader", "err", err) } }() @@ -792,7 +788,7 @@ func (h *Head) Init(minValidTime int64) error { } h.updateWALReplayStatusRead(startFrom) startFrom++ - level.Info(h.logger).Log("msg", "WAL checkpoint loaded") + h.logger.Info("WAL checkpoint loaded") } checkpointReplayDuration := time.Since(checkpointReplayStart) @@ -822,12 +818,12 @@ func (h *Head) Init(minValidTime int64) error { } err = h.loadWAL(wlog.NewReader(sr), syms, multiRef, mmappedChunks, oooMmappedChunks) if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err) + h.logger.Warn("Error while closing the wal segments reader", "err", err) } if err != nil { return err } - level.Info(h.logger).Log("msg", "WAL segment loaded", "segment", i, "maxSegment", endAt) + h.logger.Info("WAL segment loaded", "segment", i, "maxSegment", endAt) h.updateWALReplayStatusRead(i) } walReplayDuration := time.Since(walReplayStart) @@ -850,12 +846,12 @@ func (h *Head) Init(minValidTime int64) error { sr := wlog.NewSegmentBufReader(s) err = h.loadWBL(wlog.NewReader(sr), syms, multiRef, lastMmapRef) if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error while closing the wbl segments reader", "err", err) + h.logger.Warn("Error while closing the wbl segments reader", "err", err) } if err != nil { return &errLoadWbl{err} } - level.Info(h.logger).Log("msg", "WBL segment loaded", "segment", i, "maxSegment", endAt) + h.logger.Info("WBL segment loaded", "segment", i, "maxSegment", endAt) h.updateWALReplayStatusRead(i) } } @@ -864,8 +860,8 @@ func (h *Head) Init(minValidTime int64) error { totalReplayDuration := time.Since(start) h.metrics.dataTotalReplayDuration.Set(totalReplayDuration.Seconds()) - level.Info(h.logger).Log( - "msg", "WAL replay completed", + h.logger.Info( + "WAL replay completed", "checkpoint_replay_duration", checkpointReplayDuration.String(), "wal_replay_duration", walReplayDuration.String(), "wbl_replay_duration", wblReplayDuration.String(), @@ -975,28 +971,28 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) // removeCorruptedMmappedChunks attempts to delete the corrupted mmapped chunks and if it fails, it clears all the previously // loaded mmapped chunks. func (h *Head) removeCorruptedMmappedChunks(err error) (map[chunks.HeadSeriesRef][]*mmappedChunk, map[chunks.HeadSeriesRef][]*mmappedChunk, chunks.ChunkDiskMapperRef, error) { - level.Info(h.logger).Log("msg", "Deleting mmapped chunk files") + h.logger.Info("Deleting mmapped chunk files") // We never want to preserve the in-memory series from snapshots if we are repairing m-map chunks. if err := h.resetInMemoryState(); err != nil { return map[chunks.HeadSeriesRef][]*mmappedChunk{}, map[chunks.HeadSeriesRef][]*mmappedChunk{}, 0, err } - level.Info(h.logger).Log("msg", "Deleting mmapped chunk files") + h.logger.Info("Deleting mmapped chunk files") if err := h.chunkDiskMapper.DeleteCorrupted(err); err != nil { - level.Info(h.logger).Log("msg", "Deletion of corrupted mmap chunk files failed, discarding chunk files completely", "err", err) + h.logger.Info("Deletion of corrupted mmap chunk files failed, discarding chunk files completely", "err", err) if err := h.chunkDiskMapper.Truncate(math.MaxUint32); err != nil { - level.Error(h.logger).Log("msg", "Deletion of all mmap chunk files failed", "err", err) + h.logger.Error("Deletion of all mmap chunk files failed", "err", err) } return map[chunks.HeadSeriesRef][]*mmappedChunk{}, map[chunks.HeadSeriesRef][]*mmappedChunk{}, 0, nil } - level.Info(h.logger).Log("msg", "Deletion of mmap chunk files successful, reattempting m-mapping the on-disk chunks") + h.logger.Info("Deletion of mmap chunk files successful, reattempting m-mapping the on-disk chunks") mmappedChunks, oooMmappedChunks, lastRef, err := h.loadMmappedChunks(make(map[chunks.HeadSeriesRef]*memSeries)) if err != nil { - level.Error(h.logger).Log("msg", "Loading on-disk chunks failed, discarding chunk files completely", "err", err) + h.logger.Error("Loading on-disk chunks failed, discarding chunk files completely", "err", err) if err := h.chunkDiskMapper.Truncate(math.MaxUint32); err != nil { - level.Error(h.logger).Log("msg", "Deletion of all mmap chunk files failed after failed loading", "err", err) + h.logger.Error("Deletion of all mmap chunk files failed after failed loading", "err", err) } mmappedChunks = map[chunks.HeadSeriesRef][]*mmappedChunk{} } @@ -1031,7 +1027,7 @@ func (h *Head) ApplyConfig(cfg *config.Config, wbl *wlog.WL) { } migrated := h.exemplars.(*CircularExemplarStorage).Resize(newSize) - level.Info(h.logger).Log("msg", "Exemplar storage resized", "from", prevSize, "to", newSize, "migrated", migrated) + h.logger.Info("Exemplar storage resized", "from", prevSize, "to", newSize, "migrated", migrated) } // SetOutOfOrderTimeWindow updates the out of order related parameters. @@ -1342,7 +1338,7 @@ func (h *Head) truncateWAL(mint int64) error { // If truncating fails, we'll just try again at the next checkpoint. // Leftover segments will just be ignored in the future if there's a checkpoint // that supersedes them. - level.Error(h.logger).Log("msg", "truncating segments failed", "err", err) + h.logger.Error("truncating segments failed", "err", err) } // The checkpoint is written and segments before it is truncated, so we no @@ -1360,12 +1356,12 @@ func (h *Head) truncateWAL(mint int64) error { // Leftover old checkpoints do not cause problems down the line beyond // occupying disk space. // They will just be ignored since a higher checkpoint exists. - level.Error(h.logger).Log("msg", "delete old checkpoints", "err", err) + h.logger.Error("delete old checkpoints", "err", err) h.metrics.checkpointDeleteFail.Inc() } h.metrics.walTruncateDuration.Observe(time.Since(start).Seconds()) - level.Info(h.logger).Log("msg", "WAL checkpoint complete", + h.logger.Info("WAL checkpoint complete", "first", first, "last", last, "duration", time.Since(start)) return nil @@ -1403,7 +1399,7 @@ func (h *Head) truncateSeriesAndChunkDiskMapper(caller string) error { start := time.Now() headMaxt := h.MaxTime() actualMint, minOOOTime, minMmapFile := h.gc() - level.Info(h.logger).Log("msg", "Head GC completed", "caller", caller, "duration", time.Since(start)) + h.logger.Info("Head GC completed", "caller", caller, "duration", time.Since(start)) h.metrics.gcDuration.Observe(time.Since(start).Seconds()) if actualMint > h.minTime.Load() { @@ -1555,7 +1551,7 @@ func (h *Head) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Match series := h.series.getByID(chunks.HeadSeriesRef(p.At())) if series == nil { - level.Debug(h.logger).Log("msg", "Series not found in Head.Delete") + h.logger.Debug("Series not found in Head.Delete") continue } @@ -2156,7 +2152,7 @@ type memSeries struct { // before compaction: mmappedChunks=[p5,p6,p7,p8,p9] firstChunkID=5 // after compaction: mmappedChunks=[p7,p8,p9] firstChunkID=7 // - // pN is the pointer to the mmappedChunk referered to by HeadChunkID=N + // pN is the pointer to the mmappedChunk referred to by HeadChunkID=N mmappedChunks []*mmappedChunk // Most recent chunks in memory that are still being built or waiting to be mmapped. // This is a linked list, headChunks points to the most recent chunk, headChunks.next points diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go index 6a308abb14b..b545e97ae33 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go @@ -17,11 +17,9 @@ import ( "context" "errors" "fmt" + "log/slog" "math" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -42,6 +40,12 @@ type initAppender struct { var _ storage.GetRef = &initAppender{} +func (a *initAppender) SetOptions(opts *storage.AppendOptions) { + if a.app != nil { + a.app.SetOptions(opts) + } +} + func (a *initAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { if a.app != nil { return a.app.Append(ref, lset, t, v) @@ -79,6 +83,16 @@ func (a *initAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t return a.app.AppendHistogram(ref, l, t, h, fh) } +func (a *initAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if a.app != nil { + return a.app.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh) + } + a.head.initTime(t) + a.app = a.head.appender() + + return a.app.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh) +} + func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { if a.app != nil { return a.app.UpdateMetadata(ref, l, m) @@ -318,6 +332,11 @@ type headAppender struct { appendID, cleanupAppendIDsBelow uint64 closed bool + hints *storage.AppendOptions +} + +func (a *headAppender) SetOptions(opts *storage.AppendOptions) { + a.hints = opts } func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { @@ -351,13 +370,18 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 } s.Lock() + + defer s.Unlock() // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise // to skip that sample from the WAL and write only in the WBL. - _, delta, err := s.appendable(t, v, a.headMaxt, a.minValidTime, a.oooTimeWindow) + isOOO, delta, err := s.appendable(t, v, a.headMaxt, a.minValidTime, a.oooTimeWindow) if err == nil { + if isOOO && a.hints != nil && a.hints.DiscardOutOfOrder { + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Inc() + return 0, storage.ErrOutOfOrderSample + } s.pendingCommit = true } - s.Unlock() if delta > 0 { a.head.metrics.oooHistogram.Observe(float64(delta) / 1000) } @@ -392,7 +416,7 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 // storage.CreatedTimestampAppender.AppendCTZeroSample for further documentation. func (a *headAppender) AppendCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64) (storage.SeriesRef, error) { if ct >= t { - return 0, fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring") + return 0, storage.ErrCTNewerThanSample } s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) @@ -450,9 +474,10 @@ func (a *headAppender) getOrCreate(lset labels.Labels) (s *memSeries, created bo return s, created, nil } -// appendable checks whether the given sample is valid for appending to the series. (if we return false and no error) -// The sample belongs to the out of order chunk if we return true and no error. -// An error signifies the sample cannot be handled. +// appendable checks whether the given sample is valid for appending to the series. +// If the sample is valid and in-order, it returns false with no error. +// If the sample belongs to the out-of-order chunk, it returns true with no error. +// If the sample cannot be handled, it returns an error. func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTimeWindow int64) (isOOO bool, oooDelta int64, err error) { // Check if we can append in the in-order chunk. if t >= minValidTime { @@ -743,6 +768,102 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels return storage.SeriesRef(s.ref), nil } +func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if !a.head.opts.EnableNativeHistograms.Load() { + return 0, storage.ErrNativeHistogramsDisabled + } + + if ct >= t { + return 0, storage.ErrCTNewerThanSample + } + + var created bool + s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) + if s == nil { + var err error + s, created, err = a.getOrCreate(lset) + if err != nil { + return 0, err + } + } + + switch { + case h != nil: + zeroHistogram := &histogram.Histogram{} + s.Lock() + + // TODO(krajorama): reorganize Commit() to handle samples in append order + // not floats first and then histograms. Then we would not need to do this. + // This whole "if" should be removed. + if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil { + s.lastHistogramValue = zeroHistogram + } + + // Although we call `appendableHistogram` with oooHistogramsEnabled=true, for CTZeroSamples OOO is not allowed. + // We set it to true to make this implementation as close as possible to the float implementation. + isOOO, _, err := s.appendableHistogram(ct, zeroHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow, true) + if err != nil { + s.Unlock() + if errors.Is(err, storage.ErrOutOfOrderSample) { + return 0, storage.ErrOutOfOrderCT + } + } + // OOO is not allowed because after the first scrape, CT will be the same for most (if not all) future samples. + // This is to prevent the injected zero from being marked as OOO forever. + if isOOO { + s.Unlock() + return 0, storage.ErrOutOfOrderCT + } + s.pendingCommit = true + s.Unlock() + a.histograms = append(a.histograms, record.RefHistogramSample{ + Ref: s.ref, + T: ct, + H: zeroHistogram, + }) + a.histogramSeries = append(a.histogramSeries, s) + case fh != nil: + zeroFloatHistogram := &histogram.FloatHistogram{} + s.Lock() + + // TODO(krajorama): reorganize Commit() to handle samples in append order + // not floats first and then histograms. Then we would not need to do this. + // This whole "if" should be removed. + if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil { + s.lastFloatHistogramValue = zeroFloatHistogram + } + + // Although we call `appendableFloatHistogram` with oooHistogramsEnabled=true, for CTZeroSamples OOO is not allowed. + // We set it to true to make this implementation as close as possible to the float implementation. + isOOO, _, err := s.appendableFloatHistogram(ct, zeroFloatHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow, true) // OOO is not allowed for CTZeroSamples. + if err != nil { + s.Unlock() + if errors.Is(err, storage.ErrOutOfOrderSample) { + return 0, storage.ErrOutOfOrderCT + } + } + // OOO is not allowed because after the first scrape, CT will be the same for most (if not all) future samples. + // This is to prevent the injected zero from being marked as OOO forever. + if isOOO { + s.Unlock() + return 0, storage.ErrOutOfOrderCT + } + s.pendingCommit = true + s.Unlock() + a.floatHistograms = append(a.floatHistograms, record.RefFloatHistogramSample{ + Ref: s.ref, + T: ct, + FH: zeroFloatHistogram, + }) + a.floatHistogramSeries = append(a.floatHistogramSeries, s) + } + + if ct > a.maxt { + a.maxt = ct + } + return storage.SeriesRef(s.ref), nil +} + // UpdateMetadata for headAppender assumes the series ref already exists, and so it doesn't // use getOrCreate or make any of the lset sanity checks that Append does. func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels, meta metadata.Metadata) (storage.SeriesRef, error) { @@ -863,23 +984,38 @@ func exemplarsForEncoding(es []exemplarWithSeriesRef) []record.RefExemplar { return ret } -// Commit writes to the WAL and adds the data to the Head. -// TODO(codesome): Refactor this method to reduce indentation and make it more readable. -func (a *headAppender) Commit() (err error) { - if a.closed { - return ErrAppenderClosed - } - defer func() { a.closed = true }() - - if err := a.log(); err != nil { - _ = a.Rollback() // Most likely the same error will happen again. - return fmt.Errorf("write to WAL: %w", err) - } - - if a.head.writeNotified != nil { - a.head.writeNotified.Notify() - } +type appenderCommitContext struct { + floatsAppended int + histogramsAppended int + // Number of samples out of order but accepted: with ooo enabled and within time window. + oooFloatsAccepted int + oooHistogramAccepted int + // Number of samples rejected due to: out of order but OOO support disabled. + floatOOORejected int + histoOOORejected int + // Number of samples rejected due to: out of order but too old (OOO support enabled, but outside time window). + floatTooOldRejected int + histoTooOldRejected int + // Number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled). + floatOOBRejected int + histoOOBRejected int + inOrderMint int64 + inOrderMaxt int64 + oooMinT int64 + oooMaxT int64 + wblSamples []record.RefSample + wblHistograms []record.RefHistogramSample + wblFloatHistograms []record.RefFloatHistogramSample + oooMmapMarkers map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef + oooMmapMarkersCount int + oooRecords [][]byte + oooCapMax int64 + appendChunkOpts chunkOpts + enc record.Encoder +} +// commitExemplars adds all exemplars from headAppender to the head's exemplar storage. +func (a *headAppender) commitExemplars() { // No errors logging to WAL, so pass the exemplars along to the in memory storage. for _, e := range a.exemplars { s := a.head.series.getByID(chunks.HeadSeriesRef(e.ref)) @@ -894,128 +1030,117 @@ func (a *headAppender) Commit() (err error) { if errors.Is(err, storage.ErrOutOfOrderExemplar) { continue } - level.Debug(a.head.logger).Log("msg", "Unknown error while adding exemplar", "err", err) + a.head.logger.Debug("Unknown error while adding exemplar", "err", err) } } +} - defer a.head.metrics.activeAppenders.Dec() - defer a.head.putAppendBuffer(a.samples) - defer a.head.putSeriesBuffer(a.sampleSeries) - defer a.head.putExemplarBuffer(a.exemplars) - defer a.head.putHistogramBuffer(a.histograms) - defer a.head.putFloatHistogramBuffer(a.floatHistograms) - defer a.head.putMetadataBuffer(a.metadata) - defer a.head.iso.closeAppend(a.appendID) +func (acc *appenderCommitContext) collectOOORecords(a *headAppender) { + if a.head.wbl == nil { + // WBL is not enabled. So no need to collect. + acc.wblSamples = nil + acc.wblHistograms = nil + acc.wblFloatHistograms = nil + acc.oooMmapMarkers = nil + acc.oooMmapMarkersCount = 0 + return + } - var ( - floatsAppended = len(a.samples) - histogramsAppended = len(a.histograms) + len(a.floatHistograms) - // number of samples out of order but accepted: with ooo enabled and within time window - oooFloatsAccepted int - oooHistogramAccepted int - // number of samples rejected due to: out of order but OOO support disabled. - floatOOORejected int - histoOOORejected int - // number of samples rejected due to: that are out of order but too old (OOO support enabled, but outside time window) - floatTooOldRejected int - histoTooOldRejected int - // number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled) - floatOOBRejected int - histoOOBRejected int - inOrderMint int64 = math.MaxInt64 - inOrderMaxt int64 = math.MinInt64 - oooMinT int64 = math.MaxInt64 - oooMaxT int64 = math.MinInt64 - wblSamples []record.RefSample - wblHistograms []record.RefHistogramSample - wblFloatHistograms []record.RefFloatHistogramSample - oooMmapMarkers map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef - oooMmapMarkersCount int - oooRecords [][]byte - oooCapMax = a.head.opts.OutOfOrderCapMax.Load() - series *memSeries - appendChunkOpts = chunkOpts{ - chunkDiskMapper: a.head.chunkDiskMapper, - chunkRange: a.head.chunkRange.Load(), - samplesPerChunk: a.head.opts.SamplesPerChunk, - } - enc record.Encoder - ) - defer func() { - for i := range oooRecords { - a.head.putBytesBuffer(oooRecords[i][:0]) - } - }() - collectOOORecords := func() { - if a.head.wbl == nil { - // WBL is not enabled. So no need to collect. - wblSamples = nil - wblHistograms = nil - wblFloatHistograms = nil - oooMmapMarkers = nil - oooMmapMarkersCount = 0 - return - } - // The m-map happens before adding a new sample. So we collect - // the m-map markers first, and then samples. - // WBL Graphically: - // WBL Before this Commit(): [old samples before this commit for chunk 1] - // WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3] - if oooMmapMarkers != nil { - markers := make([]record.RefMmapMarker, 0, oooMmapMarkersCount) - for ref, mmapRefs := range oooMmapMarkers { - for _, mmapRef := range mmapRefs { - markers = append(markers, record.RefMmapMarker{ - Ref: ref, - MmapRef: mmapRef, - }) - } + // The m-map happens before adding a new sample. So we collect + // the m-map markers first, and then samples. + // WBL Graphically: + // WBL Before this Commit(): [old samples before this commit for chunk 1] + // WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3] + if acc.oooMmapMarkers != nil { + markers := make([]record.RefMmapMarker, 0, acc.oooMmapMarkersCount) + for ref, mmapRefs := range acc.oooMmapMarkers { + for _, mmapRef := range mmapRefs { + markers = append(markers, record.RefMmapMarker{ + Ref: ref, + MmapRef: mmapRef, + }) } - r := enc.MmapMarkers(markers, a.head.getBytesBuffer()) - oooRecords = append(oooRecords, r) } + r := acc.enc.MmapMarkers(markers, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } - if len(wblSamples) > 0 { - r := enc.Samples(wblSamples, a.head.getBytesBuffer()) - oooRecords = append(oooRecords, r) - } - if len(wblHistograms) > 0 { - r := enc.HistogramSamples(wblHistograms, a.head.getBytesBuffer()) - oooRecords = append(oooRecords, r) - } - if len(wblFloatHistograms) > 0 { - r := enc.FloatHistogramSamples(wblFloatHistograms, a.head.getBytesBuffer()) - oooRecords = append(oooRecords, r) - } + if len(acc.wblSamples) > 0 { + r := acc.enc.Samples(acc.wblSamples, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } + if len(acc.wblHistograms) > 0 { + r := acc.enc.HistogramSamples(acc.wblHistograms, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } + if len(acc.wblFloatHistograms) > 0 { + r := acc.enc.FloatHistogramSamples(acc.wblFloatHistograms, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } + + acc.wblSamples = nil + acc.wblHistograms = nil + acc.wblFloatHistograms = nil + acc.oooMmapMarkers = nil +} - wblSamples = nil - wblHistograms = nil - wblFloatHistograms = nil - oooMmapMarkers = nil +// handleAppendableError processes errors encountered during sample appending and updates +// the provided counters accordingly. +// +// Parameters: +// - err: The error encountered during appending. +// - appended: Pointer to the counter tracking the number of successfully appended samples. +// - oooRejected: Pointer to the counter tracking the number of out-of-order samples rejected. +// - oobRejected: Pointer to the counter tracking the number of out-of-bounds samples rejected. +// - tooOldRejected: Pointer to the counter tracking the number of too-old samples rejected. +func handleAppendableError(err error, appended, oooRejected, oobRejected, tooOldRejected *int) { + switch { + case errors.Is(err, storage.ErrOutOfOrderSample): + *appended-- + *oooRejected++ + case errors.Is(err, storage.ErrOutOfBounds): + *appended-- + *oobRejected++ + case errors.Is(err, storage.ErrTooOldSample): + *appended-- + *tooOldRejected++ + default: + *appended-- } +} + +// commitSamples processes and commits the samples in the headAppender to the series. +// It handles both in-order and out-of-order samples, updating the appenderCommitContext +// with the results of the append operations. +// +// The function iterates over the samples in the headAppender and attempts to append each sample +// to its corresponding series. It handles various error cases such as out-of-order samples, +// out-of-bounds samples, and too-old samples, updating the appenderCommitContext accordingly. +// +// For out-of-order samples, it checks if the sample can be inserted into the series and updates +// the out-of-order mmap markers if necessary. It also updates the write-ahead log (WBL) samples +// and the minimum and maximum timestamps for out-of-order samples. +// +// For in-order samples, it attempts to append the sample to the series and updates the minimum +// and maximum timestamps for in-order samples. +// +// The function also increments the chunk metrics if a new chunk is created and performs cleanup +// operations on the series after appending the samples. +// +// There are also specific functions to commit histograms and float histograms. +func (a *headAppender) commitSamples(acc *appenderCommitContext) { + var ok, chunkCreated bool + var series *memSeries + for i, s := range a.samples { series = a.sampleSeries[i] series.Lock() oooSample, _, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, a.oooTimeWindow) - switch { - case err == nil: - // Do nothing. - case errors.Is(err, storage.ErrOutOfOrderSample): - floatsAppended-- - floatOOORejected++ - case errors.Is(err, storage.ErrOutOfBounds): - floatsAppended-- - floatOOBRejected++ - case errors.Is(err, storage.ErrTooOldSample): - floatsAppended-- - floatTooOldRejected++ - default: - floatsAppended-- + if err != nil { + handleAppendableError(err, &acc.floatsAppended, &acc.floatOOORejected, &acc.floatOOBRejected, &acc.floatTooOldRejected) } - var ok, chunkCreated bool - switch { case err != nil: // Do nothing here. @@ -1023,9 +1148,9 @@ func (a *headAppender) Commit() (err error) { // Sample is OOO and OOO handling is enabled // and the delta is within the OOO tolerance. var mmapRefs []chunks.ChunkDiskMapperRef - ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, nil, nil, a.head.chunkDiskMapper, oooCapMax, a.head.logger) + ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, nil, nil, a.head.chunkDiskMapper, acc.oooCapMax, a.head.logger) if chunkCreated { - r, ok := oooMmapMarkers[series.ref] + r, ok := acc.oooMmapMarkers[series.ref] if !ok || r != nil { // !ok means there are no markers collected for these samples yet. So we first flush the samples // before setting this m-map marker. @@ -1033,49 +1158,49 @@ func (a *headAppender) Commit() (err error) { // r != nil means we have already m-mapped a chunk for this series in the same Commit(). // Hence, before we m-map again, we should add the samples and m-map markers // seen till now to the WBL records. - collectOOORecords() + acc.collectOOORecords(a) } - if oooMmapMarkers == nil { - oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) + if acc.oooMmapMarkers == nil { + acc.oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) } if len(mmapRefs) > 0 { - oooMmapMarkers[series.ref] = mmapRefs - oooMmapMarkersCount += len(mmapRefs) + acc.oooMmapMarkers[series.ref] = mmapRefs + acc.oooMmapMarkersCount += len(mmapRefs) } else { // No chunk was written to disk, so we need to set an initial marker for this series. - oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} - oooMmapMarkersCount++ + acc.oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} + acc.oooMmapMarkersCount++ } } if ok { - wblSamples = append(wblSamples, s) - if s.T < oooMinT { - oooMinT = s.T + acc.wblSamples = append(acc.wblSamples, s) + if s.T < acc.oooMinT { + acc.oooMinT = s.T } - if s.T > oooMaxT { - oooMaxT = s.T + if s.T > acc.oooMaxT { + acc.oooMaxT = s.T } - oooFloatsAccepted++ + acc.oooFloatsAccepted++ } else { // Sample is an exact duplicate of the last sample. // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, // not with samples in already flushed OOO chunks. // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. - floatsAppended-- + acc.floatsAppended-- } default: - ok, chunkCreated = series.append(s.T, s.V, a.appendID, appendChunkOpts) + ok, chunkCreated = series.append(s.T, s.V, a.appendID, acc.appendChunkOpts) if ok { - if s.T < inOrderMint { - inOrderMint = s.T + if s.T < acc.inOrderMint { + acc.inOrderMint = s.T } - if s.T > inOrderMaxt { - inOrderMaxt = s.T + if s.T > acc.inOrderMaxt { + acc.inOrderMaxt = s.T } } else { // The sample is an exact duplicate, and should be silently dropped. - floatsAppended-- + acc.floatsAppended-- } } @@ -1088,30 +1213,22 @@ func (a *headAppender) Commit() (err error) { series.pendingCommit = false series.Unlock() } +} + +// For details on the commitHistograms function, see the commitSamples docs. +func (a *headAppender) commitHistograms(acc *appenderCommitContext) { + var ok, chunkCreated bool + var series *memSeries for i, s := range a.histograms { series = a.histogramSeries[i] series.Lock() oooSample, _, err := series.appendableHistogram(s.T, s.H, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) - switch { - case err == nil: - // Do nothing. - case errors.Is(err, storage.ErrOutOfOrderSample): - histogramsAppended-- - histoOOORejected++ - case errors.Is(err, storage.ErrOutOfBounds): - histogramsAppended-- - histoOOBRejected++ - case errors.Is(err, storage.ErrTooOldSample): - histogramsAppended-- - histoTooOldRejected++ - default: - histogramsAppended-- + if err != nil { + handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected) } - var ok, chunkCreated bool - switch { case err != nil: // Do nothing here. @@ -1119,9 +1236,9 @@ func (a *headAppender) Commit() (err error) { // Sample is OOO and OOO handling is enabled // and the delta is within the OOO tolerance. var mmapRefs []chunks.ChunkDiskMapperRef - ok, chunkCreated, mmapRefs = series.insert(s.T, 0, s.H, nil, a.head.chunkDiskMapper, oooCapMax, a.head.logger) + ok, chunkCreated, mmapRefs = series.insert(s.T, 0, s.H, nil, a.head.chunkDiskMapper, acc.oooCapMax, a.head.logger) if chunkCreated { - r, ok := oooMmapMarkers[series.ref] + r, ok := acc.oooMmapMarkers[series.ref] if !ok || r != nil { // !ok means there are no markers collected for these samples yet. So we first flush the samples // before setting this m-map marker. @@ -1129,49 +1246,49 @@ func (a *headAppender) Commit() (err error) { // r != 0 means we have already m-mapped a chunk for this series in the same Commit(). // Hence, before we m-map again, we should add the samples and m-map markers // seen till now to the WBL records. - collectOOORecords() + acc.collectOOORecords(a) } - if oooMmapMarkers == nil { - oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) + if acc.oooMmapMarkers == nil { + acc.oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) } if len(mmapRefs) > 0 { - oooMmapMarkers[series.ref] = mmapRefs - oooMmapMarkersCount += len(mmapRefs) + acc.oooMmapMarkers[series.ref] = mmapRefs + acc.oooMmapMarkersCount += len(mmapRefs) } else { // No chunk was written to disk, so we need to set an initial marker for this series. - oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} - oooMmapMarkersCount++ + acc.oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} + acc.oooMmapMarkersCount++ } } if ok { - wblHistograms = append(wblHistograms, s) - if s.T < oooMinT { - oooMinT = s.T + acc.wblHistograms = append(acc.wblHistograms, s) + if s.T < acc.oooMinT { + acc.oooMinT = s.T } - if s.T > oooMaxT { - oooMaxT = s.T + if s.T > acc.oooMaxT { + acc.oooMaxT = s.T } - oooHistogramAccepted++ + acc.oooHistogramAccepted++ } else { // Sample is an exact duplicate of the last sample. // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, // not with samples in already flushed OOO chunks. // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. - histogramsAppended-- + acc.histogramsAppended-- } default: - ok, chunkCreated = series.appendHistogram(s.T, s.H, a.appendID, appendChunkOpts) + ok, chunkCreated = series.appendHistogram(s.T, s.H, a.appendID, acc.appendChunkOpts) if ok { - if s.T < inOrderMint { - inOrderMint = s.T + if s.T < acc.inOrderMint { + acc.inOrderMint = s.T } - if s.T > inOrderMaxt { - inOrderMaxt = s.T + if s.T > acc.inOrderMaxt { + acc.inOrderMaxt = s.T } } else { - histogramsAppended-- - histoOOORejected++ + acc.histogramsAppended-- + acc.histoOOORejected++ } } @@ -1184,30 +1301,22 @@ func (a *headAppender) Commit() (err error) { series.pendingCommit = false series.Unlock() } +} + +// For details on the commitFloatHistograms function, see the commitSamples docs. +func (a *headAppender) commitFloatHistograms(acc *appenderCommitContext) { + var ok, chunkCreated bool + var series *memSeries for i, s := range a.floatHistograms { series = a.floatHistogramSeries[i] series.Lock() oooSample, _, err := series.appendableFloatHistogram(s.T, s.FH, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) - switch { - case err == nil: - // Do nothing. - case errors.Is(err, storage.ErrOutOfOrderSample): - histogramsAppended-- - histoOOORejected++ - case errors.Is(err, storage.ErrOutOfBounds): - histogramsAppended-- - histoOOBRejected++ - case errors.Is(err, storage.ErrTooOldSample): - histogramsAppended-- - histoTooOldRejected++ - default: - histogramsAppended-- + if err != nil { + handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected) } - var ok, chunkCreated bool - switch { case err != nil: // Do nothing here. @@ -1215,9 +1324,9 @@ func (a *headAppender) Commit() (err error) { // Sample is OOO and OOO handling is enabled // and the delta is within the OOO tolerance. var mmapRefs []chunks.ChunkDiskMapperRef - ok, chunkCreated, mmapRefs = series.insert(s.T, 0, nil, s.FH, a.head.chunkDiskMapper, oooCapMax, a.head.logger) + ok, chunkCreated, mmapRefs = series.insert(s.T, 0, nil, s.FH, a.head.chunkDiskMapper, acc.oooCapMax, a.head.logger) if chunkCreated { - r, ok := oooMmapMarkers[series.ref] + r, ok := acc.oooMmapMarkers[series.ref] if !ok || r != nil { // !ok means there are no markers collected for these samples yet. So we first flush the samples // before setting this m-map marker. @@ -1225,49 +1334,49 @@ func (a *headAppender) Commit() (err error) { // r != 0 means we have already m-mapped a chunk for this series in the same Commit(). // Hence, before we m-map again, we should add the samples and m-map markers // seen till now to the WBL records. - collectOOORecords() + acc.collectOOORecords(a) } - if oooMmapMarkers == nil { - oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) + if acc.oooMmapMarkers == nil { + acc.oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) } if len(mmapRefs) > 0 { - oooMmapMarkers[series.ref] = mmapRefs - oooMmapMarkersCount += len(mmapRefs) + acc.oooMmapMarkers[series.ref] = mmapRefs + acc.oooMmapMarkersCount += len(mmapRefs) } else { // No chunk was written to disk, so we need to set an initial marker for this series. - oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} - oooMmapMarkersCount++ + acc.oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} + acc.oooMmapMarkersCount++ } } if ok { - wblFloatHistograms = append(wblFloatHistograms, s) - if s.T < oooMinT { - oooMinT = s.T + acc.wblFloatHistograms = append(acc.wblFloatHistograms, s) + if s.T < acc.oooMinT { + acc.oooMinT = s.T } - if s.T > oooMaxT { - oooMaxT = s.T + if s.T > acc.oooMaxT { + acc.oooMaxT = s.T } - oooHistogramAccepted++ + acc.oooHistogramAccepted++ } else { // Sample is an exact duplicate of the last sample. // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, // not with samples in already flushed OOO chunks. // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. - histogramsAppended-- + acc.histogramsAppended-- } default: - ok, chunkCreated = series.appendFloatHistogram(s.T, s.FH, a.appendID, appendChunkOpts) + ok, chunkCreated = series.appendFloatHistogram(s.T, s.FH, a.appendID, acc.appendChunkOpts) if ok { - if s.T < inOrderMint { - inOrderMint = s.T + if s.T < acc.inOrderMint { + acc.inOrderMint = s.T } - if s.T > inOrderMaxt { - inOrderMaxt = s.T + if s.T > acc.inOrderMaxt { + acc.inOrderMaxt = s.T } } else { - histogramsAppended-- - histoOOORejected++ + acc.histogramsAppended-- + acc.histoOOORejected++ } } @@ -1280,40 +1389,102 @@ func (a *headAppender) Commit() (err error) { series.pendingCommit = false series.Unlock() } +} +// commitMetadata commits the metadata for each series in the headAppender. +// It iterates over the metadata slice and updates the corresponding series +// with the new metadata information. The series is locked during the update +// to ensure thread safety. +func (a *headAppender) commitMetadata() { + var series *memSeries for i, m := range a.metadata { series = a.metadataSeries[i] series.Lock() series.meta = &metadata.Metadata{Type: record.ToMetricType(m.Type), Unit: m.Unit, Help: m.Help} series.Unlock() } +} - a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOORejected)) - a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histoOOORejected)) - a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOBRejected)) - a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatTooOldRejected)) - a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatsAppended)) - a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsAppended)) - a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(oooFloatsAccepted)) - a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(oooHistogramAccepted)) - a.head.updateMinMaxTime(inOrderMint, inOrderMaxt) - a.head.updateMinOOOMaxOOOTime(oooMinT, oooMaxT) +// Commit writes to the WAL and adds the data to the Head. +// TODO(codesome): Refactor this method to reduce indentation and make it more readable. +func (a *headAppender) Commit() (err error) { + if a.closed { + return ErrAppenderClosed + } + defer func() { a.closed = true }() + + if err := a.log(); err != nil { + _ = a.Rollback() // Most likely the same error will happen again. + return fmt.Errorf("write to WAL: %w", err) + } + + if a.head.writeNotified != nil { + a.head.writeNotified.Notify() + } + + a.commitExemplars() + + defer a.head.metrics.activeAppenders.Dec() + defer a.head.putAppendBuffer(a.samples) + defer a.head.putSeriesBuffer(a.sampleSeries) + defer a.head.putExemplarBuffer(a.exemplars) + defer a.head.putHistogramBuffer(a.histograms) + defer a.head.putFloatHistogramBuffer(a.floatHistograms) + defer a.head.putMetadataBuffer(a.metadata) + defer a.head.iso.closeAppend(a.appendID) + + acc := &appenderCommitContext{ + floatsAppended: len(a.samples), + histogramsAppended: len(a.histograms) + len(a.floatHistograms), + inOrderMint: math.MaxInt64, + inOrderMaxt: math.MinInt64, + oooMinT: math.MaxInt64, + oooMaxT: math.MinInt64, + oooCapMax: a.head.opts.OutOfOrderCapMax.Load(), + appendChunkOpts: chunkOpts{ + chunkDiskMapper: a.head.chunkDiskMapper, + chunkRange: a.head.chunkRange.Load(), + samplesPerChunk: a.head.opts.SamplesPerChunk, + }, + } + + defer func() { + for i := range acc.oooRecords { + a.head.putBytesBuffer(acc.oooRecords[i][:0]) + } + }() - collectOOORecords() + a.commitSamples(acc) + a.commitHistograms(acc) + a.commitFloatHistograms(acc) + a.commitMetadata() + + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOORejected)) + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histoOOORejected)) + a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOBRejected)) + a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatTooOldRejected)) + a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatsAppended)) + a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histogramsAppended)) + a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.oooFloatsAccepted)) + a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.oooHistogramAccepted)) + a.head.updateMinMaxTime(acc.inOrderMint, acc.inOrderMaxt) + a.head.updateMinOOOMaxOOOTime(acc.oooMinT, acc.oooMaxT) + + acc.collectOOORecords(a) if a.head.wbl != nil { - if err := a.head.wbl.Log(oooRecords...); err != nil { + if err := a.head.wbl.Log(acc.oooRecords...); err != nil { // TODO(codesome): Currently WBL logging of ooo samples is best effort here since we cannot try logging // until we have found what samples become OOO. We can try having a metric for this failure. // Returning the error here is not correct because we have already put the samples into the memory, // hence the append/insert was a success. - level.Error(a.head.logger).Log("msg", "Failed to log out of order samples into the WAL", "err", err) + a.head.logger.Error("Failed to log out of order samples into the WAL", "err", err) } } return nil } // insert is like append, except it inserts. Used for OOO samples. -func (s *memSeries) insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64, logger log.Logger) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) { +func (s *memSeries) insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64, logger *slog.Logger) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) { if s.ooo == nil { s.ooo = &memSeriesOOOFields{} } @@ -1702,7 +1873,7 @@ func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange // cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk. // The caller must ensure that s is locked and s.ooo is not nil. -func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) { +func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper, logger *slog.Logger) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) { ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper, logger) s.ooo.oooHeadChunk = &oooHeadChunk{ @@ -1715,7 +1886,7 @@ func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.Chunk } // s must be locked when calling. -func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) []chunks.ChunkDiskMapperRef { +func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper, logger *slog.Logger) []chunks.ChunkDiskMapperRef { if s.ooo == nil || s.ooo.oooHeadChunk == nil { // OOO is not enabled or there is no head chunk, so nothing to m-map here. return nil @@ -1728,7 +1899,7 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMap chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, len(chks)) for _, memchunk := range chks { if len(s.ooo.oooMmappedChunks) >= (oooChunkIDMask - 1) { - level.Error(logger).Log("msg", "Too many OOO chunks, dropping data", "series", s.lset.String()) + logger.Error("Too many OOO chunks, dropping data", "series", s.lset.String()) break } chunkRef := chunkDiskMapper.WriteChunk(s.ref, memchunk.minTime, memchunk.maxTime, memchunk.chunk, true, handleChunkWriteError) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_dedupelabels.go b/vendor/github.com/prometheus/prometheus/tsdb/head_dedupelabels.go index a16d9072612..a75f3372245 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_dedupelabels.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_dedupelabels.go @@ -16,8 +16,7 @@ package tsdb import ( - "github.com/go-kit/log" - "github.com/go-kit/log/level" + "log/slog" "github.com/prometheus/prometheus/model/labels" ) @@ -31,8 +30,8 @@ func (s *memSeries) labels() labels.Labels { // RebuildSymbolTable goes through all the series in h, build a SymbolTable with all names and values, // replace each series' Labels with one using that SymbolTable. -func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable { - level.Info(logger).Log("msg", "RebuildSymbolTable starting") +func (h *Head) RebuildSymbolTable(logger *slog.Logger) *labels.SymbolTable { + logger.Info("RebuildSymbolTable starting") st := labels.NewSymbolTable() builder := labels.NewScratchBuilderWithSymbolTable(st, 0) rebuildLabels := func(lbls labels.Labels) labels.Labels { @@ -66,7 +65,7 @@ func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable { if e, ok := h.exemplars.(withReset); ok { e.ResetSymbolTable(st) } - level.Info(logger).Log("msg", "RebuildSymbolTable finished", "size", st.Len()) + logger.Info("RebuildSymbolTable finished", "size", st.Len()) return st } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_other.go b/vendor/github.com/prometheus/prometheus/tsdb/head_other.go index fea91530dc7..c73872c12e1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_other.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_other.go @@ -16,7 +16,7 @@ package tsdb import ( - "github.com/go-kit/log" + "log/slog" "github.com/prometheus/prometheus/model/labels" ) @@ -27,6 +27,6 @@ func (s *memSeries) labels() labels.Labels { } // RebuildSymbolTable is a no-op when not using dedupelabels. -func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable { +func (h *Head) RebuildSymbolTable(logger *slog.Logger) *labels.SymbolTable { return nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go index 31af2b759ea..a3cd7b653d1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go @@ -21,8 +21,6 @@ import ( "slices" "sync" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -140,7 +138,7 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { for p.Next() { s := h.head.series.getByID(chunks.HeadSeriesRef(p.At())) if s == nil { - level.Debug(h.head.logger).Log("msg", "Looked up series not found") + h.head.logger.Debug("Looked up series not found") } else { series = append(series, s) } @@ -173,7 +171,7 @@ func (h *headIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCou for p.Next() { s := h.head.series.getByID(chunks.HeadSeriesRef(p.At())) if s == nil { - level.Debug(h.head.logger).Log("msg", "Looked up series not found") + h.head.logger.Debug("Looked up series not found") continue } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go index 674d7080123..6729d770901 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go @@ -24,7 +24,6 @@ import ( "sync" "time" - "github.com/go-kit/log/level" "go.uber.org/atomic" "github.com/prometheus/prometheus/model/exemplar" @@ -128,7 +127,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch // replaying the WAL, so lets just log the error if it's not that type. err = h.exemplars.AddExemplar(ms.labels(), exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels}) if err != nil && errors.Is(err, storage.ErrOutOfOrderExemplar) { - level.Warn(h.logger).Log("msg", "Unexpected error when replaying WAL on exemplar record", "err", err) + h.logger.Warn("Unexpected error when replaying WAL on exemplar record", "err", err) } } }(exemplarsInput) @@ -421,8 +420,8 @@ Outer: } if unknownRefs.Load()+unknownExemplarRefs.Load()+unknownHistogramRefs.Load()+unknownMetadataRefs.Load() > 0 { - level.Warn(h.logger).Log( - "msg", "Unknown series references", + h.logger.Warn( + "Unknown series references", "samples", unknownRefs.Load(), "exemplars", unknownExemplarRefs.Load(), "histograms", unknownHistogramRefs.Load(), @@ -430,7 +429,7 @@ Outer: ) } if count := mmapOverlappingChunks.Load(); count > 0 { - level.Info(h.logger).Log("msg", "Overlapping m-map chunks on duplicate series records", "count", count) + h.logger.Info("Overlapping m-map chunks on duplicate series records", "count", count) } return nil } @@ -446,8 +445,8 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m mmc[0].minTime, mmc[len(mmc)-1].maxTime, ) { - level.Debug(h.logger).Log( - "msg", "M-mapped chunks overlap on a duplicate series record", + h.logger.Debug( + "M-mapped chunks overlap on a duplicate series record", "series", mSeries.labels().String(), "oldref", mSeries.ref, "oldmint", mSeries.mmappedChunks[0].minTime, @@ -913,7 +912,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } if unknownRefs.Load() > 0 || mmapMarkerUnknownRefs.Load() > 0 { - level.Warn(h.logger).Log("msg", "Unknown series references for ooo WAL replay", "samples", unknownRefs.Load(), "mmap_markers", mmapMarkerUnknownRefs.Load()) + h.logger.Warn("Unknown series references for ooo WAL replay", "samples", unknownRefs.Load(), "mmap_markers", mmapMarkerUnknownRefs.Load()) } return nil } @@ -1214,7 +1213,7 @@ const chunkSnapshotPrefix = "chunk_snapshot." func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { if h.wal == nil { // If we are not storing any WAL, does not make sense to take a snapshot too. - level.Warn(h.logger).Log("msg", "skipping chunk snapshotting as WAL is disabled") + h.logger.Warn("skipping chunk snapshotting as WAL is disabled") return &ChunkSnapshotStats{}, nil } h.chunkSnapshotMtx.Lock() @@ -1363,7 +1362,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { // Leftover old chunk snapshots do not cause problems down the line beyond // occupying disk space. // They will just be ignored since a higher chunk snapshot exists. - level.Error(h.logger).Log("msg", "delete old chunk snapshots", "err", err) + h.logger.Error("delete old chunk snapshots", "err", err) } return stats, nil } @@ -1373,12 +1372,12 @@ func chunkSnapshotDir(wlast, woffset int) string { } func (h *Head) performChunkSnapshot() error { - level.Info(h.logger).Log("msg", "creating chunk snapshot") + h.logger.Info("creating chunk snapshot") startTime := time.Now() stats, err := h.ChunkSnapshot() elapsed := time.Since(startTime) if err == nil { - level.Info(h.logger).Log("msg", "chunk snapshot complete", "duration", elapsed.String(), "num_series", stats.TotalSeries, "dir", stats.Dir) + h.logger.Info("chunk snapshot complete", "duration", elapsed.String(), "num_series", stats.TotalSeries, "dir", stats.Dir) } if err != nil { return fmt.Errorf("chunk snapshot: %w", err) @@ -1493,7 +1492,7 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie } defer func() { if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "error while closing the wal segments reader", "err", err) + h.logger.Warn("error while closing the wal segments reader", "err", err) } }() @@ -1682,9 +1681,9 @@ Outer: } elapsed := time.Since(start) - level.Info(h.logger).Log("msg", "chunk snapshot loaded", "dir", dir, "num_series", numSeries, "duration", elapsed.String()) + h.logger.Info("chunk snapshot loaded", "dir", dir, "num_series", numSeries, "duration", elapsed.String()) if unknownRefs > 0 { - level.Warn(h.logger).Log("msg", "unknown series references during chunk snapshot replay", "count", unknownRefs) + h.logger.Warn("unknown series references during chunk snapshot replay", "count", unknownRefs) } return snapIdx, snapOffset, refSeries, nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go index 5f407d73da7..48ee98c9ff9 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go @@ -439,7 +439,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... return err } if labels.Compare(lset, w.lastSeries) <= 0 { - return fmt.Errorf("out-of-order series added with label set %q", lset) + return fmt.Errorf("out-of-order series added with label set %q, last label set %q", lset, w.lastSeries) } if ref < w.lastSeriesRef && !w.lastSeries.IsEmpty() { @@ -2125,5 +2125,5 @@ func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, chks *[]chu } func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go index a9871325130..8b55954d0a1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go @@ -430,15 +430,17 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, // Now `vals` only contains the values that matched, get their postings. its := make([]Postings, 0, len(vals)) + lps := make([]ListPostings, len(vals)) p.mtx.RLock() e := p.m[name] - for _, v := range vals { + for i, v := range vals { if refs, ok := e[v]; ok { // Some of the values may have been garbage-collected in the meantime this is fine, we'll just skip them. // If we didn't let the mutex go, we'd have these postings here, but they would be pointing nowhere // because there would be a `MemPostings.Delete()` call waiting for the lock to delete these labels, // because the series were deleted already. - its = append(its, NewListPostings(refs)) + lps[i] = ListPostings{list: refs} + its = append(its, &lps[i]) } } // Let the mutex go before merging. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index 64c7fac749c..44a542e3766 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -253,6 +253,10 @@ func PostingsForMatchers(ctx context.Context, ix IndexPostingsReader, ms ...*lab return nil, err } its = append(its, allPostings) + case m.Type == labels.MatchRegexp && m.Value == ".*": + // .* regexp matches any string: do nothing. + case m.Type == labels.MatchNotRegexp && m.Value == ".*": + return index.EmptyPostings(), nil case labelMustBeSet[m.Name]: // If this matcher must be non-empty, we can be smarter. matchesEmpty := m.Matches("") diff --git a/vendor/github.com/prometheus/prometheus/tsdb/repair.go b/vendor/github.com/prometheus/prometheus/tsdb/repair.go index 9d2c5738d17..8bdc645b5e3 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/repair.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/repair.go @@ -17,19 +17,17 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "os" "path/filepath" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" ) // repairBadIndexVersion repairs an issue in index and meta.json persistence introduced in // commit 129773b41a565fde5156301e37f9a87158030443. -func repairBadIndexVersion(logger log.Logger, dir string) error { +func repairBadIndexVersion(logger *slog.Logger, dir string) error { // All blocks written by Prometheus 2.1 with a meta.json version of 2 are affected. // We must actually set the index file version to 2 and revert the meta.json version back to 1. dirs, err := blockDirs(dir) @@ -41,7 +39,7 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { defer func() { for _, tmp := range tmpFiles { if err := os.RemoveAll(tmp); err != nil { - level.Error(logger).Log("msg", "remove tmp file", "err", err.Error()) + logger.Error("remove tmp file", "err", err.Error()) } } }() @@ -49,20 +47,20 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { for _, d := range dirs { meta, err := readBogusMetaFile(d) if err != nil { - level.Error(logger).Log("msg", "failed to read meta.json for a block during repair process; skipping", "dir", d, "err", err) + logger.Error("failed to read meta.json for a block during repair process; skipping", "dir", d, "err", err) continue } if meta.Version == metaVersion1 { - level.Info(logger).Log( - "msg", "Found healthy block", + logger.Info( + "Found healthy block", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID, ) continue } - level.Info(logger).Log( - "msg", "Fixing broken block", + logger.Info( + "Fixing broken block", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID, diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go b/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go index 4cea5005dbc..dcba298f3bb 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go @@ -19,15 +19,13 @@ import ( "fmt" "hash" "hash/crc32" + "log/slog" "math" "os" "path/filepath" "sort" "sync" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/encoding" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -76,7 +74,7 @@ type Reader interface { Close() error } -func WriteFile(logger log.Logger, dir string, tr Reader) (int64, error) { +func WriteFile(logger *slog.Logger, dir string, tr Reader) (int64, error) { path := filepath.Join(dir, TombstonesFilename) tmp := path + ".tmp" hash := newCRC32() @@ -89,11 +87,11 @@ func WriteFile(logger log.Logger, dir string, tr Reader) (int64, error) { defer func() { if f != nil { if err := f.Close(); err != nil { - level.Error(logger).Log("msg", "close tmp file", "err", err.Error()) + logger.Error("close tmp file", "err", err.Error()) } } if err := os.RemoveAll(tmp); err != nil { - level.Error(logger).Log("msg", "remove tmp file", "err", err.Error()) + logger.Error("remove tmp file", "err", err.Error()) } }() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go index f7b27c2e08e..b49757223f1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go @@ -16,10 +16,9 @@ package tsdb import ( "context" "fmt" + "log/slog" "path/filepath" - "github.com/go-kit/log" - "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" ) @@ -27,7 +26,7 @@ import ( var ErrInvalidTimes = fmt.Errorf("max time is lesser than min time") // CreateBlock creates a chunkrange block from the samples passed to it, and writes it to disk. -func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger log.Logger) (string, error) { +func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger *slog.Logger) (string, error) { if chunkRange == 0 { chunkRange = DefaultBlockDuration } @@ -41,7 +40,7 @@ func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger l } defer func() { if err := w.Close(); err != nil { - logger.Log("err closing blockwriter", err.Error()) + logger.Error("err closing blockwriter", "err", err.Error()) } }() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker.go index fa939879cad..4b69e1f9d61 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker.go @@ -16,11 +16,10 @@ package tsdbutil import ( "errors" "fmt" + "log/slog" "os" "path/filepath" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -34,7 +33,7 @@ const ( ) type DirLocker struct { - logger log.Logger + logger *slog.Logger createdCleanly prometheus.Gauge @@ -43,7 +42,7 @@ type DirLocker struct { } // NewDirLocker creates a DirLocker that can obtain an exclusive lock on dir. -func NewDirLocker(dir, subsystem string, l log.Logger, r prometheus.Registerer) (*DirLocker, error) { +func NewDirLocker(dir, subsystem string, l *slog.Logger, r prometheus.Registerer) (*DirLocker, error) { lock := &DirLocker{ logger: l, createdCleanly: prometheus.NewGauge(prometheus.GaugeOpts{ @@ -74,7 +73,7 @@ func (l *DirLocker) Lock() error { } if _, err := os.Stat(l.path); err == nil { - level.Warn(l.logger).Log("msg", "A lockfile from a previous execution already existed. It was replaced", "file", l.path) + l.logger.Warn("A lockfile from a previous execution already existed. It was replaced", "file", l.path) l.createdCleanly.Set(lockfileReplaced) } else { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker_testutil.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker_testutil.go index a4cf5abd68c..7228dbafed6 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker_testutil.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker_testutil.go @@ -18,8 +18,8 @@ import ( "os" "testing" - "github.com/go-kit/log" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/util/testutil" @@ -68,7 +68,7 @@ func TestDirLockerUsage(t *testing.T, open func(t *testing.T, data string, creat // Test preconditions (file already exists + lockfile option) if c.fileAlreadyExists { - tmpLocker, err := NewDirLocker(tmpdir, "tsdb", log.NewNopLogger(), nil) + tmpLocker, err := NewDirLocker(tmpdir, "tsdb", promslog.NewNopLogger(), nil) require.NoError(t, err) err = os.WriteFile(tmpLocker.path, []byte{}, 0o644) require.NoError(t, err) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go index a16cd5fc749..58e11c770e0 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "os" "path/filepath" @@ -25,9 +26,6 @@ import ( "strconv" "strings" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -94,11 +92,11 @@ const checkpointPrefix = "checkpoint." // segmented format as the original WAL itself. // This makes it easy to read it through the WAL package and concatenate // it with the original WAL. -func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) { +func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) { stats := &CheckpointStats{} var sgmReader io.ReadCloser - level.Info(logger).Log("msg", "Creating checkpoint", "from_segment", from, "to_segment", to, "mint", mint) + logger.Info("Creating checkpoint", "from_segment", from, "to_segment", to, "mint", mint) { var sgmRange []SegmentRange diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go index 6eaef5f3960..a017d362d15 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go @@ -20,9 +20,8 @@ import ( "fmt" "hash/crc32" "io" + "log/slog" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/golang/snappy" "github.com/klauspost/compress/zstd" "github.com/prometheus/client_golang/prometheus" @@ -51,7 +50,7 @@ func NewLiveReaderMetrics(reg prometheus.Registerer) *LiveReaderMetrics { } // NewLiveReader returns a new live reader. -func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) *LiveReader { +func NewLiveReader(logger *slog.Logger, metrics *LiveReaderMetrics, r io.Reader) *LiveReader { // Calling zstd.NewReader with a nil io.Reader and no options cannot return an error. zstdReader, _ := zstd.NewReader(nil) @@ -73,7 +72,7 @@ func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) * // that are still in the process of being written, and returns records as soon // as they can be read. type LiveReader struct { - logger log.Logger + logger *slog.Logger rdr io.Reader err error rec []byte @@ -311,7 +310,7 @@ func (r *LiveReader) readRecord() ([]byte, int, error) { return nil, 0, fmt.Errorf("record would overflow current page: %d > %d", r.readIndex+recordHeaderSize+length, pageSize) } r.metrics.readerCorruptionErrors.WithLabelValues("record_span_page").Inc() - level.Warn(r.logger).Log("msg", "Record spans page boundaries", "start", r.readIndex, "end", recordHeaderSize+length, "pageSize", pageSize) + r.logger.Warn("Record spans page boundaries", "start", r.readIndex, "end", recordHeaderSize+length, "pageSize", pageSize) } if recordHeaderSize+length > pageSize { return nil, 0, fmt.Errorf("record length greater than a single page: %d > %d", recordHeaderSize+length, pageSize) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go index ac5041e87b9..d68ef2accb8 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go @@ -17,6 +17,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "os" "path/filepath" @@ -24,9 +25,8 @@ import ( "strings" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" @@ -84,7 +84,7 @@ type WatcherMetrics struct { type Watcher struct { name string writer WriteTo - logger log.Logger + logger *slog.Logger walDir string lastCheckpoint string sendExemplars bool @@ -172,9 +172,9 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics { } // NewWatcher creates a new WAL watcher for a given WriteTo. -func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms, sendMetadata bool) *Watcher { +func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger *slog.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms, sendMetadata bool) *Watcher { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } return &Watcher{ logger: logger, @@ -222,7 +222,7 @@ func (w *Watcher) setMetrics() { // Start the Watcher. func (w *Watcher) Start() { w.setMetrics() - level.Info(w.logger).Log("msg", "Starting WAL watcher", "queue", w.name) + w.logger.Info("Starting WAL watcher", "queue", w.name) go w.loop() } @@ -241,7 +241,7 @@ func (w *Watcher) Stop() { w.metrics.currentSegment.DeleteLabelValues(w.name) } - level.Info(w.logger).Log("msg", "WAL watcher stopped", "queue", w.name) + w.logger.Info("WAL watcher stopped", "queue", w.name) } func (w *Watcher) loop() { @@ -251,7 +251,7 @@ func (w *Watcher) loop() { for !isClosed(w.quit) { w.SetStartTime(time.Now()) if err := w.Run(); err != nil { - level.Error(w.logger).Log("msg", "error tailing WAL", "err", err) + w.logger.Error("error tailing WAL", "err", err) } select { @@ -274,7 +274,7 @@ func (w *Watcher) Run() error { // Run will be called again if there was a failure to read the WAL. w.sendSamples = false - level.Info(w.logger).Log("msg", "Replaying WAL", "queue", w.name) + w.logger.Info("Replaying WAL", "queue", w.name) // Backfill from the checkpoint first if it exists. lastCheckpoint, checkpointIndex, err := LastCheckpoint(w.walDir) @@ -294,13 +294,13 @@ func (w *Watcher) Run() error { return err } - level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment) + w.logger.Debug("Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment) for !isClosed(w.quit) { w.currentSegmentMetric.Set(float64(currentSegment)) // On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment. // On subsequent calls to this function, currentSegment will have been incremented and we should open that segment. - level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment) + w.logger.Debug("Processing segment", "currentSegment", currentSegment) if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil && !errors.Is(err, ErrIgnorable) { return err } @@ -338,9 +338,9 @@ func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, s // Ignore all errors reading to end of segment whilst replaying the WAL. if !tail { if err != nil && !errors.Is(err, io.EOF) { - level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) + w.logger.Warn("Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) } else if r.Offset() != size { - level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", r.Offset(), "size", size) + w.logger.Warn("Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", r.Offset(), "size", size) } return ErrIgnorable } @@ -403,7 +403,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { <-gcSem }() if err := w.garbageCollectSeries(segmentNum); err != nil { - level.Warn(w.logger).Log("msg", "Error process checkpoint", "err", err) + w.logger.Warn("Error process checkpoint", "err", err) } }() default: @@ -424,7 +424,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { // we haven't read due to a notification in quite some time, try reading anyways case <-readTicker.C: - level.Debug(w.logger).Log("msg", "Watcher is reading the WAL due to timeout, haven't received any write notifications recently", "timeout", readTimeout) + w.logger.Debug("Watcher is reading the WAL due to timeout, haven't received any write notifications recently", "timeout", readTimeout) err := w.readAndHandleError(reader, segmentNum, tail, size) if err != nil { return err @@ -460,11 +460,11 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error { } if index >= segmentNum { - level.Debug(w.logger).Log("msg", "Current segment is behind the checkpoint, skipping reading of checkpoint", "current", fmt.Sprintf("%08d", segmentNum), "checkpoint", dir) + w.logger.Debug("Current segment is behind the checkpoint, skipping reading of checkpoint", "current", fmt.Sprintf("%08d", segmentNum), "checkpoint", dir) return nil } - level.Debug(w.logger).Log("msg", "New checkpoint detected", "new", dir, "currentSegment", segmentNum) + w.logger.Debug("New checkpoint detected", "new", dir, "currentSegment", segmentNum) if err = w.readCheckpoint(dir, (*Watcher).readSegmentForGC); err != nil { return fmt.Errorf("readCheckpoint: %w", err) @@ -519,7 +519,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !w.sendSamples { w.sendSamples = true duration := time.Since(w.startTime) - level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration) + w.logger.Info("Done replaying WAL", "duration", duration) } samplesToSend = append(samplesToSend, s) } @@ -564,7 +564,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !w.sendSamples { w.sendSamples = true duration := time.Since(w.startTime) - level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration) + w.logger.Info("Done replaying WAL", "duration", duration) } histogramsToSend = append(histogramsToSend, h) } @@ -592,7 +592,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !w.sendSamples { w.sendSamples = true duration := time.Since(w.startTime) - level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration) + w.logger.Info("Done replaying WAL", "duration", duration) } floatHistogramsToSend = append(floatHistogramsToSend, fh) } @@ -670,7 +670,7 @@ type segmentReadFn func(w *Watcher, r *LiveReader, segmentNum int, tail bool) er // Read all the series records from a Checkpoint directory. func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) error { - level.Debug(w.logger).Log("msg", "Reading checkpoint", "dir", checkpointDir) + w.logger.Debug("Reading checkpoint", "dir", checkpointDir) index, err := checkpointNum(checkpointDir) if err != nil { return fmt.Errorf("checkpointNum: %w", err) @@ -704,7 +704,7 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err } } - level.Debug(w.logger).Log("msg", "Read series references from checkpoint", "checkpoint", checkpointDir) + w.logger.Debug("Read series references from checkpoint", "checkpoint", checkpointDir) return nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go index b14521f358f..54c257d61a4 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go @@ -21,6 +21,7 @@ import ( "fmt" "hash/crc32" "io" + "log/slog" "os" "path/filepath" "slices" @@ -28,11 +29,10 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/golang/snappy" "github.com/klauspost/compress/zstd" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/tsdb/fileutil" ) @@ -121,7 +121,7 @@ func (e *CorruptionErr) Unwrap() error { } // OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends. -func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { +func OpenWriteSegment(logger *slog.Logger, dir string, k int) (*Segment, error) { segName := SegmentName(dir, k) f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0o666) if err != nil { @@ -138,7 +138,7 @@ func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { // If it was torn mid-record, a full read (which the caller should do anyway // to ensure integrity) will detect it as a corruption by the end. if d := stat.Size() % pageSize; d != 0 { - level.Warn(logger).Log("msg", "Last page of the wlog is torn, filling it with zeros", "segment", segName) + logger.Warn("Last page of the wlog is torn, filling it with zeros", "segment", segName) if _, err := f.Write(make([]byte, pageSize-d)); err != nil { f.Close() return nil, fmt.Errorf("zero-pad torn page: %w", err) @@ -201,7 +201,7 @@ func ParseCompressionType(compress bool, compressType string) CompressionType { // beyond the most recent segment. type WL struct { dir string - logger log.Logger + logger *slog.Logger segmentSize int mtx sync.RWMutex segment *Segment // Active segment. @@ -286,7 +286,7 @@ func newWLMetrics(w *WL, r prometheus.Registerer) *wlMetrics { }, func() float64 { val, err := w.Size() if err != nil { - level.Error(w.logger).Log("msg", "Failed to calculate size of \"wal\" dir", + w.logger.Error("Failed to calculate size of \"wal\" dir", "err", err.Error()) } return float64(val) @@ -309,13 +309,13 @@ func newWLMetrics(w *WL, r prometheus.Registerer) *wlMetrics { } // New returns a new WAL over the given directory. -func New(logger log.Logger, reg prometheus.Registerer, dir string, compress CompressionType) (*WL, error) { +func New(logger *slog.Logger, reg prometheus.Registerer, dir string, compress CompressionType) (*WL, error) { return NewSize(logger, reg, dir, DefaultSegmentSize, compress) } // NewSize returns a new write log over the given directory. // New segments are created with the specified size. -func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress CompressionType) (*WL, error) { +func NewSize(logger *slog.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress CompressionType) (*WL, error) { if segmentSize%pageSize != 0 { return nil, errors.New("invalid segment size") } @@ -323,7 +323,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi return nil, fmt.Errorf("create dir: %w", err) } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } var zstdWriter *zstd.Encoder @@ -378,9 +378,9 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi } // Open an existing WAL. -func Open(logger log.Logger, dir string) (*WL, error) { +func Open(logger *slog.Logger, dir string) (*WL, error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } zstdWriter, err := zstd.NewWriter(nil) if err != nil { @@ -443,7 +443,7 @@ func (w *WL) Repair(origErr error) error { if cerr.Segment < 0 { return errors.New("corruption error does not specify position") } - level.Warn(w.logger).Log("msg", "Starting corruption repair", + w.logger.Warn("Starting corruption repair", "segment", cerr.Segment, "offset", cerr.Offset) // All segments behind the corruption can no longer be used. @@ -451,7 +451,7 @@ func (w *WL) Repair(origErr error) error { if err != nil { return fmt.Errorf("list segments: %w", err) } - level.Warn(w.logger).Log("msg", "Deleting all segments newer than corrupted segment", "segment", cerr.Segment) + w.logger.Warn("Deleting all segments newer than corrupted segment", "segment", cerr.Segment) for _, s := range segs { if w.segment.i == s.index { @@ -473,7 +473,7 @@ func (w *WL) Repair(origErr error) error { // Regardless of the corruption offset, no record reaches into the previous segment. // So we can safely repair the WAL by removing the segment and re-inserting all // its records up to the corruption. - level.Warn(w.logger).Log("msg", "Rewrite corrupted segment", "segment", cerr.Segment) + w.logger.Warn("Rewrite corrupted segment", "segment", cerr.Segment) fn := SegmentName(w.Dir(), cerr.Segment) tmpfn := fn + ".repair" @@ -583,10 +583,10 @@ func (w *WL) nextSegment(async bool) (int, error) { // Don't block further writes by fsyncing the last segment. f := func() { if err := w.fsync(prev); err != nil { - level.Error(w.logger).Log("msg", "sync previous segment", "err", err) + w.logger.Error("sync previous segment", "err", err) } if err := prev.Close(); err != nil { - level.Error(w.logger).Log("msg", "close previous segment", "err", err) + w.logger.Error("close previous segment", "err", err) } } if async { @@ -890,10 +890,10 @@ func (w *WL) Close() (err error) { <-donec if err = w.fsync(w.segment); err != nil { - level.Error(w.logger).Log("msg", "sync previous segment", "err", err) + w.logger.Error("sync previous segment", "err", err) } if err := w.segment.Close(); err != nil { - level.Error(w.logger).Log("msg", "close previous segment", "err", err) + w.logger.Error("close previous segment", "err", err) } w.metrics.Unregister() diff --git a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go index b0272b7fee0..1b743f70576 100644 --- a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go +++ b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go @@ -146,6 +146,8 @@ var ( PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo) HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo) + IncompatibleTypesInBinOpInfo = fmt.Errorf("%w: incompatible sample types encountered for binary operator", PromQLInfo) + HistogramIgnoredInAggregationInfo = fmt.Errorf("%w: ignored histogram in", PromQLInfo) ) type annoErr struct { @@ -273,3 +275,21 @@ func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange. Err: fmt.Errorf("%w %q", HistogramQuantileForcedMonotonicityInfo, metricName), } } + +// NewIncompatibleTypesInBinOpInfo is used if binary operators act on a +// combination of types that doesn't work and therefore returns no result. +func NewIncompatibleTypesInBinOpInfo(lhsType, operator, rhsType string, pos posrange.PositionRange) error { + return annoErr{ + PositionRange: pos, + Err: fmt.Errorf("%w %q: %s %s %s", IncompatibleTypesInBinOpInfo, operator, lhsType, operator, rhsType), + } +} + +// NewHistogramIgnoredInAggregationInfo is used when a histogram is ignored by +// an aggregation operator that cannot handle histograms. +func NewHistogramIgnoredInAggregationInfo(aggregation string, pos posrange.PositionRange) error { + return annoErr{ + PositionRange: pos, + Err: fmt.Errorf("%w %s aggregation", HistogramIgnoredInAggregationInfo, aggregation), + } +} diff --git a/vendor/github.com/prometheus/prometheus/util/convertnhcb/convertnhcb.go b/vendor/github.com/prometheus/prometheus/util/convertnhcb/convertnhcb.go new file mode 100644 index 00000000000..5e08422aa06 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/util/convertnhcb/convertnhcb.go @@ -0,0 +1,173 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package convertnhcb + +import ( + "fmt" + "math" + "sort" + "strings" + + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" +) + +// TempHistogram is used to collect information about classic histogram +// samples incrementally before creating a histogram.Histogram or +// histogram.FloatHistogram based on the values collected. +type TempHistogram struct { + BucketCounts map[float64]float64 + Count float64 + Sum float64 + HasFloat bool +} + +// NewTempHistogram creates a new TempHistogram to +// collect information about classic histogram samples. +func NewTempHistogram() TempHistogram { + return TempHistogram{ + BucketCounts: map[float64]float64{}, + } +} + +func (h TempHistogram) getIntBucketCounts() (map[float64]int64, error) { + bucketCounts := map[float64]int64{} + for le, count := range h.BucketCounts { + intCount := int64(math.Round(count)) + if float64(intCount) != count { + return nil, fmt.Errorf("bucket count %f for le %g is not an integer", count, le) + } + bucketCounts[le] = intCount + } + return bucketCounts, nil +} + +// ProcessUpperBoundsAndCreateBaseHistogram prepares an integer native +// histogram with custom buckets based on the provided upper bounds. +// Everything is set except the bucket counts. +// The sorted upper bounds are also returned. +func ProcessUpperBoundsAndCreateBaseHistogram(upperBounds0 []float64, needsDedup bool) ([]float64, *histogram.Histogram) { + sort.Float64s(upperBounds0) + var upperBounds []float64 + if needsDedup { + upperBounds = make([]float64, 0, len(upperBounds0)) + prevLE := math.Inf(-1) + for _, le := range upperBounds0 { + if le != prevLE { + upperBounds = append(upperBounds, le) + prevLE = le + } + } + } else { + upperBounds = upperBounds0 + } + var customBounds []float64 + if upperBounds[len(upperBounds)-1] == math.Inf(1) { + customBounds = upperBounds[:len(upperBounds)-1] + } else { + customBounds = upperBounds + } + return upperBounds, &histogram.Histogram{ + Count: 0, + Sum: 0, + Schema: histogram.CustomBucketsSchema, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: uint32(len(upperBounds))}, + }, + PositiveBuckets: make([]int64, len(upperBounds)), + CustomValues: customBounds, + } +} + +// NewHistogram fills the bucket counts in the provided histogram.Histogram +// or histogram.FloatHistogram based on the provided temporary histogram and +// upper bounds. +func NewHistogram(histogram TempHistogram, upperBounds []float64, hBase *histogram.Histogram, fhBase *histogram.FloatHistogram) (*histogram.Histogram, *histogram.FloatHistogram) { + intBucketCounts, err := histogram.getIntBucketCounts() + if err != nil { + return nil, newFloatHistogram(histogram, upperBounds, histogram.BucketCounts, fhBase) + } + return newIntegerHistogram(histogram, upperBounds, intBucketCounts, hBase), nil +} + +func newIntegerHistogram(histogram TempHistogram, upperBounds []float64, bucketCounts map[float64]int64, hBase *histogram.Histogram) *histogram.Histogram { + h := hBase.Copy() + absBucketCounts := make([]int64, len(h.PositiveBuckets)) + var prevCount, total int64 + for i, le := range upperBounds { + currCount, exists := bucketCounts[le] + if !exists { + currCount = 0 + } + count := currCount - prevCount + absBucketCounts[i] = count + total += count + prevCount = currCount + } + h.PositiveBuckets[0] = absBucketCounts[0] + for i := 1; i < len(h.PositiveBuckets); i++ { + h.PositiveBuckets[i] = absBucketCounts[i] - absBucketCounts[i-1] + } + h.Sum = histogram.Sum + if histogram.Count != 0 { + total = int64(histogram.Count) + } + h.Count = uint64(total) + return h.Compact(0) +} + +func newFloatHistogram(histogram TempHistogram, upperBounds []float64, bucketCounts map[float64]float64, fhBase *histogram.FloatHistogram) *histogram.FloatHistogram { + fh := fhBase.Copy() + var prevCount, total float64 + for i, le := range upperBounds { + currCount, exists := bucketCounts[le] + if !exists { + currCount = 0 + } + count := currCount - prevCount + fh.PositiveBuckets[i] = count + total += count + prevCount = currCount + } + fh.Sum = histogram.Sum + if histogram.Count != 0 { + total = histogram.Count + } + fh.Count = total + return fh.Compact(0) +} + +func GetHistogramMetricBase(m labels.Labels, suffix string) labels.Labels { + mName := m.Get(labels.MetricName) + return labels.NewBuilder(m). + Set(labels.MetricName, strings.TrimSuffix(mName, suffix)). + Del(labels.BucketLabel). + Labels() +} + +// GetHistogramMetricBaseName removes the suffixes _bucket, _sum, _count from +// the metric name. We specifically do not remove the _created suffix as that +// should be removed by the caller. +func GetHistogramMetricBaseName(s string) string { + if r, ok := strings.CutSuffix(s, "_bucket"); ok { + return r + } + if r, ok := strings.CutSuffix(s, "_sum"); ok { + return r + } + if r, ok := strings.CutSuffix(s, "_count"); ok { + return r + } + return s +} diff --git a/vendor/github.com/prometheus/prometheus/util/logging/dedupe.go b/vendor/github.com/prometheus/prometheus/util/logging/dedupe.go index d490a6afdf1..d5aee5c095c 100644 --- a/vendor/github.com/prometheus/prometheus/util/logging/dedupe.go +++ b/vendor/github.com/prometheus/prometheus/util/logging/dedupe.go @@ -14,12 +14,10 @@ package logging import ( - "bytes" + "context" + "log/slog" "sync" "time" - - "github.com/go-kit/log" - "github.com/go-logfmt/logfmt" ) const ( @@ -28,22 +26,9 @@ const ( maxEntries = 1024 ) -type logfmtEncoder struct { - *logfmt.Encoder - buf bytes.Buffer -} - -var logfmtEncoderPool = sync.Pool{ - New: func() interface{} { - var enc logfmtEncoder - enc.Encoder = logfmt.NewEncoder(&enc.buf) - return &enc - }, -} - -// Deduper implement log.Logger, dedupes log lines. +// Deduper implements *slog.Handler, dedupes log lines based on a time duration. type Deduper struct { - next log.Logger + next *slog.Logger repeat time.Duration quit chan struct{} mtx sync.RWMutex @@ -51,7 +36,7 @@ type Deduper struct { } // Dedupe log lines to next, only repeating every repeat duration. -func Dedupe(next log.Logger, repeat time.Duration) *Deduper { +func Dedupe(next *slog.Logger, repeat time.Duration) *Deduper { d := &Deduper{ next: next, repeat: repeat, @@ -62,6 +47,63 @@ func Dedupe(next log.Logger, repeat time.Duration) *Deduper { return d } +// Enabled returns true if the Deduper's internal slog.Logger is enabled at the +// provided context and log level, and returns false otherwise. It implements +// slog.Handler. +func (d *Deduper) Enabled(ctx context.Context, level slog.Level) bool { + return d.next.Enabled(ctx, level) +} + +// Handle uses the provided context and slog.Record to deduplicate messages +// every 1m. Log records received within the interval are not acted on, and +// thus dropped. Log records that pass deduplication and need action invoke the +// Handle() method on the Deduper's internal slog.Logger's handler, effectively +// chaining log calls to the internal slog.Logger. +func (d *Deduper) Handle(ctx context.Context, r slog.Record) error { + line := r.Message + d.mtx.RLock() + last, ok := d.seen[line] + d.mtx.RUnlock() + + if ok && time.Since(last) < d.repeat { + return nil + } + + d.mtx.Lock() + if len(d.seen) < maxEntries { + d.seen[line] = time.Now() + } + d.mtx.Unlock() + + return d.next.Handler().Handle(ctx, r.Clone()) +} + +// WithAttrs adds the provided attributes to the Deduper's internal +// slog.Logger. It implements slog.Handler. +func (d *Deduper) WithAttrs(attrs []slog.Attr) slog.Handler { + return &Deduper{ + next: slog.New(d.next.Handler().WithAttrs(attrs)), + repeat: d.repeat, + quit: d.quit, + seen: d.seen, + } +} + +// WithGroup adds the provided group name to the Deduper's internal +// slog.Logger. It implements slog.Handler. +func (d *Deduper) WithGroup(name string) slog.Handler { + if name == "" { + return d + } + + return &Deduper{ + next: slog.New(d.next.Handler().WithGroup(name)), + repeat: d.repeat, + quit: d.quit, + seen: d.seen, + } +} + // Stop the Deduper. func (d *Deduper) Stop() { close(d.quit) @@ -87,44 +129,3 @@ func (d *Deduper) run() { } } } - -// Log implements log.Logger. -func (d *Deduper) Log(keyvals ...interface{}) error { - line, err := encode(keyvals...) - if err != nil { - return err - } - - d.mtx.RLock() - last, ok := d.seen[line] - d.mtx.RUnlock() - - if ok && time.Since(last) < d.repeat { - return nil - } - - d.mtx.Lock() - if len(d.seen) < maxEntries { - d.seen[line] = time.Now() - } - d.mtx.Unlock() - - return d.next.Log(keyvals...) -} - -func encode(keyvals ...interface{}) (string, error) { - enc := logfmtEncoderPool.Get().(*logfmtEncoder) - enc.buf.Reset() - defer logfmtEncoderPool.Put(enc) - - if err := enc.EncodeKeyvals(keyvals...); err != nil { - return "", err - } - - // Add newline to the end of the buffer - if err := enc.EndRecord(); err != nil { - return "", err - } - - return enc.buf.String(), nil -} diff --git a/vendor/github.com/prometheus/prometheus/util/logging/file.go b/vendor/github.com/prometheus/prometheus/util/logging/file.go index 2afa828547f..f20927bedaf 100644 --- a/vendor/github.com/prometheus/prometheus/util/logging/file.go +++ b/vendor/github.com/prometheus/prometheus/util/logging/file.go @@ -15,20 +15,15 @@ package logging import ( "fmt" + "log/slog" "os" - "time" - "github.com/go-kit/log" + "github.com/prometheus/common/promslog" ) -var timestampFormat = log.TimestampFormat( - func() time.Time { return time.Now().UTC() }, - "2006-01-02T15:04:05.000Z07:00", -) - -// JSONFileLogger represents a logger that writes JSON to a file. +// JSONFileLogger represents a logger that writes JSON to a file. It implements the promql.QueryLogger interface. type JSONFileLogger struct { - logger log.Logger + logger *slog.Logger file *os.File } @@ -40,21 +35,48 @@ func NewJSONFileLogger(s string) (*JSONFileLogger, error) { f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666) if err != nil { - return nil, fmt.Errorf("can't create json logger: %w", err) + return nil, fmt.Errorf("can't create json log file: %w", err) } + jsonFmt := &promslog.AllowedFormat{} + _ = jsonFmt.Set("json") return &JSONFileLogger{ - logger: log.With(log.NewJSONLogger(f), "ts", timestampFormat), + logger: promslog.New(&promslog.Config{Format: jsonFmt, Writer: f}), file: f, }, nil } -// Close closes the underlying file. +// Close closes the underlying file. It implements the promql.QueryLogger interface. func (l *JSONFileLogger) Close() error { return l.file.Close() } -// Log calls the Log function of the underlying logger. -func (l *JSONFileLogger) Log(i ...interface{}) error { - return l.logger.Log(i...) +// With calls the `With()` method on the underlying `log/slog.Logger` with the +// provided msg and args. It implements the promql.QueryLogger interface. +func (l *JSONFileLogger) With(args ...any) { + l.logger = l.logger.With(args...) +} + +// Info calls the `Info()` method on the underlying `log/slog.Logger` with the +// provided msg and args. It implements the promql.QueryLogger interface. +func (l *JSONFileLogger) Info(msg string, args ...any) { + l.logger.Info(msg, args...) +} + +// Error calls the `Error()` method on the underlying `log/slog.Logger` with the +// provided msg and args. It implements the promql.QueryLogger interface. +func (l *JSONFileLogger) Error(msg string, args ...any) { + l.logger.Error(msg, args...) +} + +// Debug calls the `Debug()` method on the underlying `log/slog.Logger` with the +// provided msg and args. It implements the promql.QueryLogger interface. +func (l *JSONFileLogger) Debug(msg string, args ...any) { + l.logger.Debug(msg, args...) +} + +// Warn calls the `Warn()` method on the underlying `log/slog.Logger` with the +// provided msg and args. It implements the promql.QueryLogger interface. +func (l *JSONFileLogger) Warn(msg string, args ...any) { + l.logger.Warn(msg, args...) } diff --git a/vendor/github.com/prometheus/prometheus/util/logging/ratelimit.go b/vendor/github.com/prometheus/prometheus/util/logging/ratelimit.go deleted file mode 100644 index 32d1e249e68..00000000000 --- a/vendor/github.com/prometheus/prometheus/util/logging/ratelimit.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logging - -import ( - "github.com/go-kit/log" - "golang.org/x/time/rate" -) - -type ratelimiter struct { - limiter *rate.Limiter - next log.Logger -} - -// RateLimit write to a logger. -func RateLimit(next log.Logger, limit rate.Limit) log.Logger { - return &ratelimiter{ - limiter: rate.NewLimiter(limit, int(limit)), - next: next, - } -} - -func (r *ratelimiter) Log(keyvals ...interface{}) error { - if r.limiter.Allow() { - return r.next.Log(keyvals...) - } - return nil -} diff --git a/vendor/github.com/prometheus/prometheus/util/notifications/notifications.go b/vendor/github.com/prometheus/prometheus/util/notifications/notifications.go new file mode 100644 index 00000000000..4888a0b6641 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/util/notifications/notifications.go @@ -0,0 +1,185 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifications + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + ConfigurationUnsuccessful = "Configuration reload has failed." + StartingUp = "Prometheus is starting and replaying the write-ahead log (WAL)." + ShuttingDown = "Prometheus is shutting down and gracefully stopping all operations." +) + +// Notification represents an individual notification message. +type Notification struct { + Text string `json:"text"` + Date time.Time `json:"date"` + Active bool `json:"active"` +} + +// Notifications stores a list of Notification objects. +// It also manages live subscribers that receive notifications via channels. +type Notifications struct { + mu sync.Mutex + notifications []Notification + subscribers map[chan Notification]struct{} // Active subscribers. + maxSubscribers int + + subscriberGauge prometheus.Gauge + notificationsSent prometheus.Counter + notificationsDropped prometheus.Counter +} + +// NewNotifications creates a new Notifications instance. +func NewNotifications(maxSubscribers int, reg prometheus.Registerer) *Notifications { + n := &Notifications{ + subscribers: make(map[chan Notification]struct{}), + maxSubscribers: maxSubscribers, + subscriberGauge: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "notification_active_subscribers", + Help: "The current number of active notification subscribers.", + }), + notificationsSent: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "notification_updates_sent_total", + Help: "Total number of notification updates sent.", + }), + notificationsDropped: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "notification_updates_dropped_total", + Help: "Total number of notification updates dropped.", + }), + } + + if reg != nil { + reg.MustRegister(n.subscriberGauge, n.notificationsSent, n.notificationsDropped) + } + + return n +} + +// AddNotification adds a new notification or updates the timestamp if it already exists. +func (n *Notifications) AddNotification(text string) { + n.mu.Lock() + defer n.mu.Unlock() + + for i, notification := range n.notifications { + if notification.Text == text { + n.notifications[i].Date = time.Now() + + n.notifySubscribers(n.notifications[i]) + return + } + } + + newNotification := Notification{ + Text: text, + Date: time.Now(), + Active: true, + } + n.notifications = append(n.notifications, newNotification) + + n.notifySubscribers(newNotification) +} + +// notifySubscribers sends a notification to all active subscribers. +func (n *Notifications) notifySubscribers(notification Notification) { + for sub := range n.subscribers { + // Non-blocking send to avoid subscriber blocking issues. + n.notificationsSent.Inc() + select { + case sub <- notification: + // Notification sent to the subscriber. + default: + // Drop the notification if the subscriber's channel is full. + n.notificationsDropped.Inc() + } + } +} + +// DeleteNotification removes the first notification that matches the provided text. +// The deleted notification is sent to subscribers with Active: false before being removed. +func (n *Notifications) DeleteNotification(text string) { + n.mu.Lock() + defer n.mu.Unlock() + + // Iterate through the notifications to find the matching text. + for i, notification := range n.notifications { + if notification.Text == text { + // Mark the notification as inactive and notify subscribers. + notification.Active = false + n.notifySubscribers(notification) + + // Remove the notification from the list. + n.notifications = append(n.notifications[:i], n.notifications[i+1:]...) + return + } + } +} + +// Get returns a copy of the list of notifications for safe access outside the struct. +func (n *Notifications) Get() []Notification { + n.mu.Lock() + defer n.mu.Unlock() + + // Return a copy of the notifications slice to avoid modifying the original slice outside. + notificationsCopy := make([]Notification, len(n.notifications)) + copy(notificationsCopy, n.notifications) + return notificationsCopy +} + +// Sub allows a client to subscribe to live notifications. +// It returns a channel where the subscriber will receive notifications and a function to unsubscribe. +// Each subscriber has its own goroutine to handle notifications and prevent blocking. +func (n *Notifications) Sub() (<-chan Notification, func(), bool) { + n.mu.Lock() + defer n.mu.Unlock() + + if len(n.subscribers) >= n.maxSubscribers { + return nil, nil, false + } + + ch := make(chan Notification, 10) // Buffered channel to prevent blocking. + + // Add the new subscriber to the list. + n.subscribers[ch] = struct{}{} + n.subscriberGauge.Set(float64(len(n.subscribers))) + + // Send all current notifications to the new subscriber. + for _, notification := range n.notifications { + ch <- notification + } + + // Unsubscribe function to remove the channel from subscribers. + unsubscribe := func() { + n.mu.Lock() + defer n.mu.Unlock() + + // Close the channel and remove it from the subscribers map. + close(ch) + delete(n.subscribers, ch) + n.subscriberGauge.Set(float64(len(n.subscribers))) + } + + return ch, unsubscribe, true +} diff --git a/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go b/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go index 7d1f9dda242..e15d591e0c7 100644 --- a/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go +++ b/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go @@ -30,15 +30,15 @@ import ( // New returns a new TestStorage for testing purposes // that removes all associated files on closing. -func New(t testutil.T) *TestStorage { - stor, err := NewWithError() +func New(t testutil.T, outOfOrderTimeWindow ...int64) *TestStorage { + stor, err := NewWithError(outOfOrderTimeWindow...) require.NoError(t, err) return stor } // NewWithError returns a new TestStorage for user facing tests, which reports // errors directly. -func NewWithError() (*TestStorage, error) { +func NewWithError(outOfOrderTimeWindow ...int64) (*TestStorage, error) { dir, err := os.MkdirTemp("", "test_storage") if err != nil { return nil, fmt.Errorf("opening test directory: %w", err) @@ -51,6 +51,14 @@ func NewWithError() (*TestStorage, error) { opts.MaxBlockDuration = int64(24 * time.Hour / time.Millisecond) opts.RetentionDuration = 0 opts.EnableNativeHistograms = true + + // Set OutOfOrderTimeWindow if provided, otherwise use default (0) + if len(outOfOrderTimeWindow) > 0 { + opts.OutOfOrderTimeWindow = outOfOrderTimeWindow[0] + } else { + opts.OutOfOrderTimeWindow = 0 // Default value is zero + } + db, err := tsdb.Open(dir, nil, nil, opts, tsdb.NewDBStats()) if err != nil { return nil, fmt.Errorf("opening test storage: %w", err) diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/logging.go b/vendor/github.com/prometheus/prometheus/util/testutil/logging.go deleted file mode 100644 index db096ea2342..00000000000 --- a/vendor/github.com/prometheus/prometheus/util/testutil/logging.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package testutil - -import ( - "testing" - - "github.com/go-kit/log" -) - -type logger struct { - t *testing.T -} - -// NewLogger returns a gokit compatible Logger which calls t.Log. -func NewLogger(t *testing.T) log.Logger { - return logger{t: t} -} - -// Log implements log.Logger. -func (t logger) Log(keyvals ...interface{}) error { - t.t.Log(keyvals...) - return nil -} diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/port.go b/vendor/github.com/prometheus/prometheus/util/testutil/port.go index 1e449b123d3..7cf4cf1ccc9 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/port.go +++ b/vendor/github.com/prometheus/prometheus/util/testutil/port.go @@ -15,21 +15,56 @@ package testutil import ( "net" + "sync" "testing" ) +var ( + mu sync.Mutex + usedPorts []int +) + // RandomUnprivilegedPort returns valid unprivileged random port number which can be used for testing. func RandomUnprivilegedPort(t *testing.T) int { t.Helper() + mu.Lock() + defer mu.Unlock() + + port, err := getPort() + if err != nil { + t.Fatal(err) + } + + for portWasUsed(port) { + port, err = getPort() + if err != nil { + t.Fatal(err) + } + } + + usedPorts = append(usedPorts, port) + + return port +} + +func portWasUsed(port int) bool { + for _, usedPort := range usedPorts { + if port == usedPort { + return true + } + } + return false +} +func getPort() (int, error) { listener, err := net.Listen("tcp", ":0") if err != nil { - t.Fatalf("Listening on random port: %v", err) + return 0, err } if err := listener.Close(); err != nil { - t.Fatalf("Closing listener: %v", err) + return 0, err } - return listener.Addr().(*net.TCPAddr).Port + return listener.Addr().(*net.TCPAddr).Port, nil } diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index ff700d835f1..63ffe627604 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -15,8 +15,12 @@ package v1 import ( "context" + "crypto/sha1" + "encoding/hex" + "encoding/json" "errors" "fmt" + "log/slog" "math" "math/rand" "net" @@ -30,8 +34,6 @@ import ( "strings" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/grafana/regexp" jsoniter "github.com/json-iterator/go" "github.com/munnerz/goautoneg" @@ -53,6 +55,7 @@ import ( "github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/httputil" + "github.com/prometheus/prometheus/util/notifications" "github.com/prometheus/prometheus/util/stats" ) @@ -202,16 +205,18 @@ type API struct { ready func(http.HandlerFunc) http.HandlerFunc globalURLOptions GlobalURLOptions - db TSDBAdminStats - dbDir string - enableAdmin bool - logger log.Logger - CORSOrigin *regexp.Regexp - buildInfo *PrometheusVersion - runtimeInfo func() (RuntimeInfo, error) - gatherer prometheus.Gatherer - isAgent bool - statsRenderer StatsRenderer + db TSDBAdminStats + dbDir string + enableAdmin bool + logger *slog.Logger + CORSOrigin *regexp.Regexp + buildInfo *PrometheusVersion + runtimeInfo func() (RuntimeInfo, error) + gatherer prometheus.Gatherer + isAgent bool + statsRenderer StatsRenderer + notificationsGetter func() []notifications.Notification + notificationsSub func() (<-chan notifications.Notification, func(), bool) remoteWriteHandler http.Handler remoteReadHandler http.Handler @@ -236,7 +241,7 @@ func NewAPI( db TSDBAdminStats, dbDir string, enableAdmin bool, - logger log.Logger, + logger *slog.Logger, rr func(context.Context) RulesRetriever, remoteReadSampleLimit int, remoteReadConcurrencyLimit int, @@ -245,6 +250,8 @@ func NewAPI( corsOrigin *regexp.Regexp, runtimeInfo func() (RuntimeInfo, error), buildInfo *PrometheusVersion, + notificationsGetter func() []notifications.Notification, + notificationsSub func() (<-chan notifications.Notification, func(), bool), gatherer prometheus.Gatherer, registerer prometheus.Registerer, statsRenderer StatsRenderer, @@ -263,22 +270,24 @@ func NewAPI( targetRetriever: tr, alertmanagerRetriever: ar, - now: time.Now, - config: configFunc, - flagsMap: flagsMap, - ready: readyFunc, - globalURLOptions: globalURLOptions, - db: db, - dbDir: dbDir, - enableAdmin: enableAdmin, - rulesRetriever: rr, - logger: logger, - CORSOrigin: corsOrigin, - runtimeInfo: runtimeInfo, - buildInfo: buildInfo, - gatherer: gatherer, - isAgent: isAgent, - statsRenderer: DefaultStatsRenderer, + now: time.Now, + config: configFunc, + flagsMap: flagsMap, + ready: readyFunc, + globalURLOptions: globalURLOptions, + db: db, + dbDir: dbDir, + enableAdmin: enableAdmin, + rulesRetriever: rr, + logger: logger, + CORSOrigin: corsOrigin, + runtimeInfo: runtimeInfo, + buildInfo: buildInfo, + gatherer: gatherer, + isAgent: isAgent, + statsRenderer: DefaultStatsRenderer, + notificationsGetter: notificationsGetter, + notificationsSub: notificationsSub, remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame), } @@ -368,6 +377,9 @@ func (api *API) Register(r *route.Router) { r.Get("/format_query", wrapAgent(api.formatQuery)) r.Post("/format_query", wrapAgent(api.formatQuery)) + r.Get("/parse_query", wrapAgent(api.parseQuery)) + r.Post("/parse_query", wrapAgent(api.parseQuery)) + r.Get("/labels", wrapAgent(api.labelNames)) r.Post("/labels", wrapAgent(api.labelNames)) r.Get("/label/:name/values", wrapAgent(api.labelValues)) @@ -389,6 +401,8 @@ func (api *API) Register(r *route.Router) { r.Get("/status/flags", wrap(api.serveFlags)) r.Get("/status/tsdb", wrapAgent(api.serveTSDBStatus)) r.Get("/status/walreplay", api.serveWALReplayStatus) + r.Get("/notifications", api.notifications) + r.Get("/notifications/live", api.notificationsSSE) r.Post("/read", api.ready(api.remoteRead)) r.Post("/write", api.ready(api.remoteWrite)) r.Post("/otlp/v1/metrics", api.ready(api.otlpWrite)) @@ -487,6 +501,15 @@ func (api *API) formatQuery(r *http.Request) (result apiFuncResult) { return apiFuncResult{expr.Pretty(0), nil, nil, nil} } +func (api *API) parseQuery(r *http.Request) apiFuncResult { + expr, err := parser.ParseExpr(r.FormValue("query")) + if err != nil { + return invalidParamError(err, "query") + } + + return apiFuncResult{data: translateAST(expr), err: nil, warnings: nil, finalizer: nil} +} + func extractQueryOpts(r *http.Request) (promql.QueryOpts, error) { var duration time.Duration @@ -814,12 +837,22 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { } var ( - // MinTime is the default timestamp used for the begin of optional time ranges. - // Exposed to let downstream projects to reference it. + // MinTime is the default timestamp used for the start of optional time ranges. + // Exposed to let downstream projects reference it. + // + // Historical note: This should just be time.Unix(math.MinInt64/1000, 0).UTC(), + // but it was set to a higher value in the past due to a misunderstanding. + // The value is still low enough for practical purposes, so we don't want + // to change it now, avoiding confusion for importers of this variable. MinTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC() // MaxTime is the default timestamp used for the end of optional time ranges. // Exposed to let downstream projects to reference it. + // + // Historical note: This should just be time.Unix(math.MaxInt64/1000, 0).UTC(), + // but it was set to a lower value in the past due to a misunderstanding. + // The value is still high enough for practical purposes, so we don't want + // to change it now, avoiding confusion for importers of this variable. MaxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC() minTimeFormatted = MinTime.Format(time.RFC3339Nano) @@ -1342,7 +1375,8 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { // RuleDiscovery has info for all rules. type RuleDiscovery struct { - RuleGroups []*RuleGroup `json:"groups"` + RuleGroups []*RuleGroup `json:"groups"` + GroupNextToken string `json:"groupNextToken:omitempty"` } // RuleGroup has info for rules which are part of a group. @@ -1429,8 +1463,23 @@ func (api *API) rules(r *http.Request) apiFuncResult { return invalidParamError(err, "exclude_alerts") } + maxGroups, nextToken, parseErr := parseListRulesPaginationRequest(r) + if parseErr != nil { + return *parseErr + } + rgs := make([]*RuleGroup, 0, len(ruleGroups)) + + foundToken := false + for _, grp := range ruleGroups { + if maxGroups > 0 && nextToken != "" && !foundToken { + if nextToken != getRuleGroupNextToken(grp.File(), grp.Name()) { + continue + } + foundToken = true + } + if len(rgSet) > 0 { if _, ok := rgSet[grp.Name()]; !ok { continue @@ -1475,6 +1524,7 @@ func (api *API) rules(r *http.Request) apiFuncResult { if !excludeAlerts { activeAlerts = rulesAlertsToAPIAlerts(rule.ActiveAlerts()) } + enrichedRule = AlertingRule{ State: rule.State().String(), Name: rule.Name(), @@ -1490,6 +1540,7 @@ func (api *API) rules(r *http.Request) apiFuncResult { LastEvaluation: rule.GetEvaluationTimestamp(), Type: "alerting", } + case *rules.RecordingRule: if !returnRecording { break @@ -1516,9 +1567,20 @@ func (api *API) rules(r *http.Request) apiFuncResult { // If the rule group response has no rules, skip it - this means we filtered all the rules of this group. if len(apiRuleGroup.Rules) > 0 { + if maxGroups > 0 && len(rgs) == int(maxGroups) { + // We've reached the capacity of our page plus one. That means that for sure there will be at least one + // rule group in a subsequent request. Therefore a next token is required. + res.GroupNextToken = getRuleGroupNextToken(grp.File(), grp.Name()) + break + } rgs = append(rgs, apiRuleGroup) } } + + if maxGroups > 0 && nextToken != "" && !foundToken { + return invalidParamError(fmt.Errorf("invalid group_next_token '%v'. were rule groups changed?", nextToken), "group_next_token") + } + res.RuleGroups = rgs return apiFuncResult{res, nil, nil, nil} } @@ -1537,6 +1599,44 @@ func parseExcludeAlerts(r *http.Request) (bool, error) { return excludeAlerts, nil } +func parseListRulesPaginationRequest(r *http.Request) (int64, string, *apiFuncResult) { + var ( + parsedMaxGroups int64 = -1 + err error + ) + maxGroups := r.URL.Query().Get("group_limit") + nextToken := r.URL.Query().Get("group_next_token") + + if nextToken != "" && maxGroups == "" { + errResult := invalidParamError(fmt.Errorf("group_limit needs to be present in order to paginate over the groups"), "group_next_token") + return -1, "", &errResult + } + + if maxGroups != "" { + parsedMaxGroups, err = strconv.ParseInt(maxGroups, 10, 32) + if err != nil { + errResult := invalidParamError(fmt.Errorf("group_limit needs to be a valid number: %w", err), "group_limit") + return -1, "", &errResult + } + if parsedMaxGroups <= 0 { + errResult := invalidParamError(fmt.Errorf("group_limit needs to be greater than 0"), "group_limit") + return -1, "", &errResult + } + } + + if parsedMaxGroups > 0 { + return parsedMaxGroups, nextToken, nil + } + + return -1, "", nil +} + +func getRuleGroupNextToken(file, group string) string { + h := sha1.New() + h.Write([]byte(file + ";" + group)) + return hex.EncodeToString(h.Sum(nil)) +} + type prometheusConfig struct { YAML string `json:"yaml"` } @@ -1658,6 +1758,57 @@ func (api *API) serveWALReplayStatus(w http.ResponseWriter, r *http.Request) { }, nil, "") } +func (api *API) notifications(w http.ResponseWriter, r *http.Request) { + httputil.SetCORS(w, api.CORSOrigin, r) + api.respond(w, r, api.notificationsGetter(), nil, "") +} + +func (api *API) notificationsSSE(w http.ResponseWriter, r *http.Request) { + httputil.SetCORS(w, api.CORSOrigin, r) + w.Header().Set("Content-Type", "text/event-stream") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("Connection", "keep-alive") + + // Subscribe to notifications. + notifications, unsubscribe, ok := api.notificationsSub() + if !ok { + w.WriteHeader(http.StatusNoContent) + return + } + defer unsubscribe() + + // Set up a flusher to push the response to the client. + flusher, ok := w.(http.Flusher) + if !ok { + http.Error(w, "Streaming unsupported", http.StatusInternalServerError) + return + } + + // Flush the response to ensure the headers are immediately and eventSource + // onopen is triggered client-side. + flusher.Flush() + + for { + select { + case notification := <-notifications: + // Marshal the notification to JSON. + jsonData, err := json.Marshal(notification) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + continue + } + + // Write the event data in SSE format with JSON content. + fmt.Fprintf(w, "data: %s\n\n", jsonData) + + // Flush the response to ensure the data is sent immediately. + flusher.Flush() + case <-r.Context().Done(): + return + } + } +} + func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) { // This is only really for tests - this will never be nil IRL. if api.remoteReadHandler != nil { @@ -1679,7 +1830,7 @@ func (api *API) otlpWrite(w http.ResponseWriter, r *http.Request) { if api.otlpWriteHandler != nil { api.otlpWriteHandler.ServeHTTP(w, r) } else { - http.Error(w, "otlp write receiver needs to be enabled with --enable-feature=otlp-write-receiver", http.StatusNotFound) + http.Error(w, "otlp write receiver needs to be enabled with --web.enable-otlp-receiver", http.StatusNotFound) } } @@ -1782,7 +1933,7 @@ func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface b, err := codec.Encode(resp) if err != nil { - level.Error(api.logger).Log("msg", "error marshaling response", "url", req.URL, "err", err) + api.logger.Error("error marshaling response", "url", req.URL, "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -1790,7 +1941,7 @@ func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface w.Header().Set("Content-Type", codec.ContentType().String()) w.WriteHeader(http.StatusOK) if n, err := w.Write(b); err != nil { - level.Error(api.logger).Log("msg", "error writing response", "url", req.URL, "bytesWritten", n, "err", err) + api.logger.Error("error writing response", "url", req.URL, "bytesWritten", n, "err", err) } } @@ -1820,7 +1971,7 @@ func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data inter Data: data, }) if err != nil { - level.Error(api.logger).Log("msg", "error marshaling json response", "err", err) + api.logger.Error("error marshaling json response", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -1848,7 +1999,7 @@ func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data inter w.Header().Set("Content-Type", "application/json") w.WriteHeader(code) if n, err := w.Write(b); err != nil { - level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err) + api.logger.Error("error writing response", "bytesWritten", n, "err", err) } } diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/translate_ast.go b/vendor/github.com/prometheus/prometheus/web/api/v1/translate_ast.go new file mode 100644 index 00000000000..afa11f16b9d --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/translate_ast.go @@ -0,0 +1,157 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "strconv" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" +) + +// Take a Go PromQL AST and translate it to an object that's nicely JSON-serializable +// for the tree view in the UI. +// TODO: Could it make sense to do this via the normal JSON marshalling methods? Maybe +// too UI-specific though. +func translateAST(node parser.Expr) interface{} { + if node == nil { + return nil + } + + switch n := node.(type) { + case *parser.AggregateExpr: + return map[string]interface{}{ + "type": "aggregation", + "op": n.Op.String(), + "expr": translateAST(n.Expr), + "param": translateAST(n.Param), + "grouping": sanitizeList(n.Grouping), + "without": n.Without, + } + case *parser.BinaryExpr: + var matching interface{} + if m := n.VectorMatching; m != nil { + matching = map[string]interface{}{ + "card": m.Card.String(), + "labels": sanitizeList(m.MatchingLabels), + "on": m.On, + "include": sanitizeList(m.Include), + } + } + + return map[string]interface{}{ + "type": "binaryExpr", + "op": n.Op.String(), + "lhs": translateAST(n.LHS), + "rhs": translateAST(n.RHS), + "matching": matching, + "bool": n.ReturnBool, + } + case *parser.Call: + args := []interface{}{} + for _, arg := range n.Args { + args = append(args, translateAST(arg)) + } + + return map[string]interface{}{ + "type": "call", + "func": map[string]interface{}{ + "name": n.Func.Name, + "argTypes": n.Func.ArgTypes, + "variadic": n.Func.Variadic, + "returnType": n.Func.ReturnType, + }, + "args": args, + } + case *parser.MatrixSelector: + vs := n.VectorSelector.(*parser.VectorSelector) + return map[string]interface{}{ + "type": "matrixSelector", + "name": vs.Name, + "range": n.Range.Milliseconds(), + "offset": vs.OriginalOffset.Milliseconds(), + "matchers": translateMatchers(vs.LabelMatchers), + "timestamp": vs.Timestamp, + "startOrEnd": getStartOrEnd(vs.StartOrEnd), + } + case *parser.SubqueryExpr: + return map[string]interface{}{ + "type": "subquery", + "expr": translateAST(n.Expr), + "range": n.Range.Milliseconds(), + "offset": n.OriginalOffset.Milliseconds(), + "step": n.Step.Milliseconds(), + "timestamp": n.Timestamp, + "startOrEnd": getStartOrEnd(n.StartOrEnd), + } + case *parser.NumberLiteral: + return map[string]string{ + "type": "numberLiteral", + "val": strconv.FormatFloat(n.Val, 'f', -1, 64), + } + case *parser.ParenExpr: + return map[string]interface{}{ + "type": "parenExpr", + "expr": translateAST(n.Expr), + } + case *parser.StringLiteral: + return map[string]interface{}{ + "type": "stringLiteral", + "val": n.Val, + } + case *parser.UnaryExpr: + return map[string]interface{}{ + "type": "unaryExpr", + "op": n.Op.String(), + "expr": translateAST(n.Expr), + } + case *parser.VectorSelector: + return map[string]interface{}{ + "type": "vectorSelector", + "name": n.Name, + "offset": n.OriginalOffset.Milliseconds(), + "matchers": translateMatchers(n.LabelMatchers), + "timestamp": n.Timestamp, + "startOrEnd": getStartOrEnd(n.StartOrEnd), + } + } + panic("unsupported node type") +} + +func sanitizeList(l []string) []string { + if l == nil { + return []string{} + } + return l +} + +func translateMatchers(in []*labels.Matcher) interface{} { + out := []map[string]interface{}{} + for _, m := range in { + out = append(out, map[string]interface{}{ + "name": m.Name, + "value": m.Value, + "type": m.Type.String(), + }) + } + return out +} + +func getStartOrEnd(startOrEnd parser.ItemType) interface{} { + if startOrEnd == 0 { + return nil + } + + return startOrEnd.String() +} diff --git a/vendor/github.com/tjhop/slog-gokit/.goreleaser.yaml b/vendor/github.com/tjhop/slog-gokit/.goreleaser.yaml new file mode 100644 index 00000000000..f092384c215 --- /dev/null +++ b/vendor/github.com/tjhop/slog-gokit/.goreleaser.yaml @@ -0,0 +1,29 @@ +version: 2 + +builds: +- skip: true +gomod: + proxy: true + mod: mod +checksum: + name_template: 'checksums.txt' +snapshot: + version_template: "{{ incpatch .Version }}-next" +changelog: + sort: asc + filters: + exclude: + - '^Merge pull request' + - '^ci(?:\(\w+\))?\!?:' + - '^docs(?:\(\w+\))?\!?:' + - '^test(?:\(\w+\))?\!?:' + - '^style(?:\(\w+\))?\!?:' + groups: + - title: "New Features And Changes" + regexp: '^.*?feat(\([[:word:]]+\))??!?:.+$' + order: 0 + - title: "Fixes" + regexp: '^.*?fix(\([[:word:]]+\))??!?:.+$' + order: 1 + - title: "Other Changes" + order: 999 diff --git a/vendor/github.com/tjhop/slog-gokit/LICENSE b/vendor/github.com/tjhop/slog-gokit/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/tjhop/slog-gokit/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/tjhop/slog-gokit/Makefile b/vendor/github.com/tjhop/slog-gokit/Makefile new file mode 100644 index 00000000000..dce8bb04e1c --- /dev/null +++ b/vendor/github.com/tjhop/slog-gokit/Makefile @@ -0,0 +1,27 @@ +GOCMD := go +GOFMT := ${GOCMD} fmt +GOMOD := ${GOCMD} mod +GOTEST := ${GOCMD} test +GOLANGCILINT_CACHE := ${CURDIR}/.golangci-lint/build/cache + +# autogenerate help messages for comment lines with 2 `#` +.PHONY: help +help: ## print this help message + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n\nTargets:\n"} /^[a-z0-9A-Z_-]+:.*?##/ { printf " \033[36m%-30s\033[0m%s\n", $$1, $$2 }' $(MAKEFILE_LIST) + +.PHONY: tidy +tidy: ## tidy modules + ${GOMOD} tidy + +.PHONY: fmt +fmt: ## apply go code style formatter + ${GOFMT} -x ./... + +.PHONY: lint +lint: ## run linters + mkdir -p ${GOLANGCILINT_CACHE} || true + docker run --rm -v ${CURDIR}:/app -v ${GOLANGCILINT_CACHE}:/root/.cache -w /app docker.io/golangci/golangci-lint:latest golangci-lint run -v + +.PHONY: test +test: ## run go tests + ${GOTEST} -race -v . diff --git a/vendor/github.com/tjhop/slog-gokit/README.md b/vendor/github.com/tjhop/slog-gokit/README.md new file mode 100644 index 00000000000..d6b5e51de6e --- /dev/null +++ b/vendor/github.com/tjhop/slog-gokit/README.md @@ -0,0 +1,40 @@ +# Go slog-gokit Adapter + +This library provides a custom slog.Handler that wraps a go-kit Logger, so that loggers created via `slog.New()` chain their log calls to the internal go-kit Logger. + +## Install + +```bash +go get github.com/tjhop/slog-gokit +``` + +## Example + +```go +package main + +import ( + "log/slog" + "os" + + "github.com/go-kit/log" + slgk "github.com/tjhop/slog-gokit" +) + +func main() { + // Take an existing go-kit/log Logger: + gklogger := log.NewLogfmtLogger(os.Stderr) + + // Create an slog Logger that chains log calls to the go-kit/log Logger: + slogger := slog.New(slgk.NewGoKitHandler(gklogger, nil)) + slogger.WithGroup("example_group").With("foo", "bar").Info("hello world") + + // The slog Logger produced logs at slog.LevelInfo by default. + // Optionally create an slog.Leveler to dynamically adjust the level of + // the slog Logger. + lvl := &slog.LevelVar{} + lvl.Set(slog.LevelDebug) + slogger = slog.New(slgk.NewGoKitHandler(gklogger, lvl)) + slogger.WithGroup("example_group").With("foo", "bar").Info("hello world") +} +``` diff --git a/vendor/github.com/tjhop/slog-gokit/handler.go b/vendor/github.com/tjhop/slog-gokit/handler.go new file mode 100644 index 00000000000..a926595292f --- /dev/null +++ b/vendor/github.com/tjhop/slog-gokit/handler.go @@ -0,0 +1,139 @@ +package sloggokit + +import ( + "context" + "log/slog" + "os" + + "github.com/go-kit/log" +) + +var _ slog.Handler = (*GoKitHandler)(nil) + +var defaultGoKitLogger = log.NewLogfmtLogger(os.Stderr) + +// GoKitHandler implements the slog.Handler interface. It holds an internal +// go-kit logger that is used to perform the true logging. +type GoKitHandler struct { + level slog.Leveler + logger log.Logger + preformatted []any + group string +} + +// NewGoKitHandler returns a new slog logger from the provided go-kit +// logger. Calls to the slog logger are chained to the handler's internal +// go-kit logger. If provided a level, it will be used to filter log events in +// the handler's Enabled() method. +func NewGoKitHandler(logger log.Logger, level slog.Leveler) slog.Handler { + if logger == nil { + logger = defaultGoKitLogger + } + + // Adjust runtime call depth to compensate for the adapter and point to + // the appropriate source line. + logger = log.With(logger, "caller", log.Caller(6)) + + if level == nil { + level = &slog.LevelVar{} // Info level by default. + } + + return &GoKitHandler{logger: logger, level: level} +} + +func (h *GoKitHandler) Enabled(_ context.Context, level slog.Level) bool { + if h.level == nil { + h.level = &slog.LevelVar{} // Info level by default. + } + + return level >= h.level.Level() +} + +func (h *GoKitHandler) Handle(_ context.Context, record slog.Record) error { + if h.logger == nil { + h.logger = defaultGoKitLogger + } + + logger := goKitLevelFunc(h.logger, record.Level) + + // 1 slog.Attr == 1 key and 1 value, set capacity >= (2 * num attrs). + // + // Note: this could probably be (micro)-optimized further -- we know we + // need to also append on a timestamp from the record, the message, the + // preformatted vals, all things we more or less know the size of at + // creation time here. + pairs := make([]any, 0, (2 * record.NumAttrs())) + if !record.Time.IsZero() { + pairs = append(pairs, "time", record.Time) + } + pairs = append(pairs, "msg", record.Message) + pairs = append(pairs, h.preformatted...) + + record.Attrs(func(a slog.Attr) bool { + pairs = appendPair(pairs, h.group, a) + return true + }) + + return logger.Log(pairs...) +} + +func (h *GoKitHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + pairs := make([]any, 0, 2*len(attrs)) + for _, a := range attrs { + pairs = appendPair(pairs, h.group, a) + } + + if h.preformatted != nil { + pairs = append(h.preformatted, pairs...) + } + + return &GoKitHandler{ + logger: h.logger, + level: h.level, + preformatted: pairs, + group: h.group, + } +} + +func (h *GoKitHandler) WithGroup(name string) slog.Handler { + if name == "" { + return h + } + + g := name + if h.group != "" { + g = h.group + "." + g + } + + return &GoKitHandler{ + logger: h.logger, + level: h.level, + preformatted: h.preformatted, + group: g, + } +} + +func appendPair(pairs []any, groupPrefix string, attr slog.Attr) []any { + if attr.Equal(slog.Attr{}) { + return pairs + } + + switch attr.Value.Kind() { + case slog.KindGroup: + if attr.Key != "" { + groupPrefix = groupPrefix + "." + attr.Key + } + for _, a := range attr.Value.Group() { + pairs = appendPair(pairs, groupPrefix, a) + } + default: + key := attr.Key + if groupPrefix != "" { + key = groupPrefix + "." + key + } + + pairs = append(pairs, key, attr.Value) + } + + return pairs +} diff --git a/vendor/github.com/tjhop/slog-gokit/level.go b/vendor/github.com/tjhop/slog-gokit/level.go new file mode 100644 index 00000000000..467d991a9c3 --- /dev/null +++ b/vendor/github.com/tjhop/slog-gokit/level.go @@ -0,0 +1,23 @@ +package sloggokit + +import ( + "log/slog" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" +) + +func goKitLevelFunc(logger log.Logger, lvl slog.Level) log.Logger { + switch lvl { + case slog.LevelInfo: + logger = level.Info(logger) + case slog.LevelWarn: + logger = level.Warn(logger) + case slog.LevelError: + logger = level.Error(logger) + default: + logger = level.Debug(logger) + } + + return logger +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go index 67e03f24810..6ef23721cb7 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go @@ -215,6 +215,10 @@ func (ct *clientTracer) start(hook, spanName string, attrs ...attribute.KeyValue func (ct *clientTracer) end(hook string, err error, attrs ...attribute.KeyValue) { if !ct.useSpans { + // sometimes end may be called without previous start + if ct.root == nil { + ct.root = trace.SpanFromContext(ct.Context) + } if err != nil { attrs = append(attrs, attribute.String(hook+".error", err.Error())) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go index 16d9fc97bae..a978bb5b288 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go @@ -5,7 +5,7 @@ package otelhttptrace // import "go.opentelemetry.io/contrib/instrumentation/net // Version is the current release version of the httptrace instrumentation. func Version() string { - return "0.55.0" + return "0.56.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index 5d6e6156b7b..a83a026274a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -18,13 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Client HTTP metrics. -const ( - clientRequestSize = "http.client.request.size" // Outgoing request bytes total - clientResponseSize = "http.client.response.size" // Outgoing response bytes total - clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds -) - // Filter is a predicate used to determine whether a given http.request should // be traced. A Filter must return true if the request should be traced. type Filter func(*http.Request) bool diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 33580a35b77..e4236ab398c 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -81,12 +81,6 @@ func (h *middleware) configure(c *config) { h.semconv = semconv.NewHTTPServer(c.Meter) } -func handleErr(err error) { - if err != nil { - otel.Handle(err) - } -} - // serveHTTP sets up tracing and calls the given next http.Handler with the span // context injected into the request context. func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { @@ -190,14 +184,18 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.semconv.RecordMetrics(ctx, semconv.MetricData{ - ServerName: h.server, - Req: r, - StatusCode: statusCode, - AdditionalAttributes: labeler.Get(), - RequestSize: bw.BytesRead(), - ResponseSize: bytesWritten, - ElapsedTime: elapsedTime, + h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{ + ServerName: h.server, + ResponseSize: bytesWritten, + MetricAttributes: semconv.MetricAttributes{ + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + }, + MetricData: semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, }) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 9cae4cab86a..fb893b25042 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -83,18 +83,26 @@ func (s HTTPServer) Status(code int) (codes.Code, string) { return codes.Unset, "" } -type MetricData struct { - ServerName string +type ServerMetricData struct { + ServerName string + ResponseSize int64 + + MetricData + MetricAttributes +} + +type MetricAttributes struct { Req *http.Request StatusCode int AdditionalAttributes []attribute.KeyValue +} - RequestSize int64 - ResponseSize int64 - ElapsedTime float64 +type MetricData struct { + RequestSize int64 + ElapsedTime float64 } -func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { +func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { // This will happen if an HTTPServer{} is used insted of NewHTTPServer. return @@ -102,7 +110,7 @@ func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. + addOpts := []metric.AddOption{o} s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) @@ -122,11 +130,20 @@ func NewHTTPServer(meter metric.Meter) HTTPServer { type HTTPClient struct { duplicate bool + + // old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + latencyMeasure metric.Float64Histogram } -func NewHTTPClient() HTTPClient { +func NewHTTPClient(meter metric.Meter) HTTPClient { env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) - return HTTPClient{duplicate: env == "http/dup"} + client := HTTPClient{ + duplicate: env == "http/dup", + } + client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = oldHTTPClient{}.createMeasures(meter) + return client } // RequestTraceAttrs returns attributes for an HTTP request made by a client. @@ -163,3 +180,48 @@ func (c HTTPClient) ErrorType(err error) attribute.KeyValue { return attribute.KeyValue{} } + +type MetricOpts struct { + measurement metric.MeasurementOption + addOptions metric.AddOption +} + +func (o MetricOpts) MeasurementOption() metric.MeasurementOption { + return o.measurement +} + +func (o MetricOpts) AddOptions() metric.AddOption { + return o.addOptions +} + +func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts { + attributes := oldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + // TODO: Duplicate Metrics + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + return MetricOpts{ + measurement: set, + addOptions: set, + } +} + +func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) { + if s.requestBytesCounter == nil || s.latencyMeasure == nil { + // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). + return + } + + s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions()) + s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption()) + + // TODO: Duplicate Metrics +} + +func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) { + if s.responseBytesCounter == nil { + // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). + return + } + + s.responseBytesCounter.Add(ctx, responseData, opts) + // TODO: Duplicate Metrics +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index c999b05e675..5367732ec5d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -144,7 +144,7 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - o.methodMetric(req.Method), + standardizeHTTPMethodMetric(req.Method), o.scheme(req.TLS != nil), semconv.NetHostName(host)) @@ -164,16 +164,6 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status return attributes } -func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" - } - return semconv.HTTPMethod(method) -} - func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconv.HTTPSchemeHTTPS @@ -190,3 +180,95 @@ func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { return semconvutil.HTTPClientResponse(resp) } + +func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + http.status_code int + net.peer.name string + net.peer.port int + */ + + n := 2 // method, peer name. + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = splitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + standardizeHTTPMethodMetric(req.Method), + semconv.NetPeerName(requestHost), + ) + + if port > 0 { + attributes = append(attributes, semconv.NetPeerPort(port)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +// Client HTTP metrics. +const ( + clientRequestSize = "http.client.request.size" // Incoming request bytes total + clientResponseSize = "http.client.response.size" // Incoming response bytes total + clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds +) + +func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + requestBytesCounter, err := meter.Int64Counter( + clientRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + clientResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + latencyMeasure, err := meter.Float64Histogram( + clientDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of outbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, latencyMeasure +} + +func standardizeHTTPMethodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index b4119d3438b..39681ad4b09 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -13,11 +13,9 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" @@ -29,7 +27,6 @@ type Transport struct { rt http.RoundTripper tracer trace.Tracer - meter metric.Meter propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption filters []Filter @@ -37,10 +34,7 @@ type Transport struct { clientTrace func(context.Context) *httptrace.ClientTrace metricAttributesFn func(*http.Request) []attribute.KeyValue - semconv semconv.HTTPClient - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - latencyMeasure metric.Float64Histogram + semconv semconv.HTTPClient } var _ http.RoundTripper = &Transport{} @@ -57,8 +51,7 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { } t := Transport{ - rt: base, - semconv: semconv.NewHTTPClient(), + rt: base, } defaultOpts := []Option{ @@ -68,46 +61,21 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { c := newConfig(append(defaultOpts, opts...)...) t.applyConfig(c) - t.createMeasures() return &t } func (t *Transport) applyConfig(c *config) { t.tracer = c.Tracer - t.meter = c.Meter t.propagators = c.Propagators t.spanStartOptions = c.SpanStartOptions t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace + t.semconv = semconv.NewHTTPClient(c.Meter) t.metricAttributesFn = c.MetricAttributesFn } -func (t *Transport) createMeasures() { - var err error - t.requestBytesCounter, err = t.meter.Int64Counter( - clientRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - t.responseBytesCounter, err = t.meter.Int64Counter( - clientResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - t.latencyMeasure, err = t.meter.Float64Histogram( - clientDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of outbound HTTP requests."), - ) - handleErr(err) -} - func defaultTransportFormatter(_ string, r *http.Request) string { return "HTTP " + r.Method } @@ -177,16 +145,15 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { } // metrics - metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) - if res.StatusCode > 0 { - metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) - } - o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{ + Req: r, + StatusCode: res.StatusCode, + AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...), + }) - t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, o) + t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions()) } // traces @@ -198,9 +165,12 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - t.latencyMeasure.Record(ctx, elapsedTime, o) + t.semconv.RecordMetrics(ctx, semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, metricOpts) - return res, err + return res, nil } func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 1133961d393..a07d8689d47 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.55.0" + return "0.56.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index a5f904197fe..d09555506f7 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -25,6 +25,7 @@ linters: - revive - staticcheck - tenv + - testifylint - typecheck - unconvert - unused @@ -302,3 +303,9 @@ linters-settings: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value - name: waitgroup-by-value disabled: false + testifylint: + enable-all: true + disable: + - float-compare + - go-require + - require-error diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index fb107426e76..4b361d0269c 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,35 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862) +- Add `WithExportBufferSize` option to log batch processor.(#5877) + +### Changed + +- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778) +- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791) +- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791) +- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847) +- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864) +- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858) +- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874) + +### Deprecated + +- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854) + +### Fixed + +- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819) +- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803) +- Fix timer channel drain to avoid hanging on Go 1.23. (#5868) +- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827) +- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827) + ## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09 ### Added @@ -3081,7 +3110,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.30.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...HEAD +[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 [1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 [1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 [1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 5904bb7070e..945a07d2b07 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @pellared @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu +CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 91580725350..bb339655743 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -631,11 +631,8 @@ should be canceled. ### Approvers -- [Chester Cheung](https://github.com/hanyuancheung), Tencent - ### Maintainers -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep - [Damien Mathieu](https://github.com/dmathieu), Elastic - [David Ashpole](https://github.com/dashpole), Google - [Robert PajÄ…k](https://github.com/pellared), Splunk @@ -644,11 +641,13 @@ should be canceled. ### Emeritus -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep +- [Aaron Clawson](https://github.com/MadVikingGod), LightStep - [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Chester Cheung](https://github.com/hanyuancheung), Tencent - [Evan Torrie](https://github.com/evantorrie), Yahoo +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep +- [Josh MacDonald](https://github.com/jmacd), LightStep +- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index b04695b242f..a1228a21240 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -54,9 +54,6 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer PORTO = $(TOOLS)/porto $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto -GOJQ = $(TOOLS)/gojq -$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq - GOTMPL = $(TOOLS)/gotmpl $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl @@ -67,7 +64,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 9a65707038c..efec278905b 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -89,8 +89,8 @@ If you need to extend the telemetry an instrumentation library provides or want to build your own instrumentation for your application directly you will need to use the [Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) -package. The included [examples](./example/) are a good way to see some -practical uses of this process. +package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples) +are a good way to see some practical uses of this process. ### Export diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 59992984d42..ffa9b61258a 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -111,17 +111,6 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. -## Verify Examples - -After releasing verify that examples build outside of the repository. - -``` -./verify_examples.sh -``` - -The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. -This ensures they build with the published release, not the local copy. - ## Post-Release ### Contrib Repository diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index bff9c7fdbb9..6cbefceadfe 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -347,45 +347,25 @@ func computeDistinct(kvs []KeyValue) Distinct { func computeDistinctFixed(kvs []KeyValue) interface{} { switch len(kvs) { case 1: - ptr := new([1]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [1]KeyValue(kvs) case 2: - ptr := new([2]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [2]KeyValue(kvs) case 3: - ptr := new([3]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [3]KeyValue(kvs) case 4: - ptr := new([4]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [4]KeyValue(kvs) case 5: - ptr := new([5]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [5]KeyValue(kvs) case 6: - ptr := new([6]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [6]KeyValue(kvs) case 7: - ptr := new([7]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [7]KeyValue(kvs) case 8: - ptr := new([8]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [8]KeyValue(kvs) case 9: - ptr := new([9]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [9]KeyValue(kvs) case 10: - ptr := new([10]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [10]KeyValue(kvs) default: return nil } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index f2fc3929b11..e3db438a09f 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -152,14 +152,17 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) return m.delegate.Int64Counter(name, options...) } - i := &siCounter{name: name, opts: options} cfg := metric.NewInt64CounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*siCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Counter), nil + } + i := &siCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -172,14 +175,17 @@ func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCou return m.delegate.Int64UpDownCounter(name, options...) } - i := &siUpDownCounter{name: name, opts: options} cfg := metric.NewInt64UpDownCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*siUpDownCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64UpDownCounter), nil + } + i := &siUpDownCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -192,14 +198,17 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti return m.delegate.Int64Histogram(name, options...) } - i := &siHistogram{name: name, opts: options} cfg := metric.NewInt64HistogramConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*siHistogram)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Histogram), nil + } + i := &siHistogram{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -212,14 +221,17 @@ func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (met return m.delegate.Int64Gauge(name, options...) } - i := &siGauge{name: name, opts: options} cfg := metric.NewInt64GaugeConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*siGauge)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Gauge), nil + } + i := &siGauge{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -232,14 +244,17 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser return m.delegate.Int64ObservableCounter(name, options...) } - i := &aiCounter{name: name, opts: options} cfg := metric.NewInt64ObservableCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*aiCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableCounter), nil + } + i := &aiCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -252,14 +267,17 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6 return m.delegate.Int64ObservableUpDownCounter(name, options...) } - i := &aiUpDownCounter{name: name, opts: options} cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*aiUpDownCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableUpDownCounter), nil + } + i := &aiUpDownCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -272,14 +290,17 @@ func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64Observa return m.delegate.Int64ObservableGauge(name, options...) } - i := &aiGauge{name: name, opts: options} cfg := metric.NewInt64ObservableGaugeConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*aiGauge)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableGauge), nil + } + i := &aiGauge{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -292,14 +313,17 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti return m.delegate.Float64Counter(name, options...) } - i := &sfCounter{name: name, opts: options} cfg := metric.NewFloat64CounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*sfCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Counter), nil + } + i := &sfCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -312,14 +336,17 @@ func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDow return m.delegate.Float64UpDownCounter(name, options...) } - i := &sfUpDownCounter{name: name, opts: options} cfg := metric.NewFloat64UpDownCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*sfUpDownCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64UpDownCounter), nil + } + i := &sfUpDownCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -332,14 +359,17 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram return m.delegate.Float64Histogram(name, options...) } - i := &sfHistogram{name: name, opts: options} cfg := metric.NewFloat64HistogramConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*sfHistogram)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Histogram), nil + } + i := &sfHistogram{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -352,14 +382,17 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) return m.delegate.Float64Gauge(name, options...) } - i := &sfGauge{name: name, opts: options} cfg := metric.NewFloat64GaugeConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*sfGauge)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Gauge), nil + } + i := &sfGauge{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -372,14 +405,17 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O return m.delegate.Float64ObservableCounter(name, options...) } - i := &afCounter{name: name, opts: options} cfg := metric.NewFloat64ObservableCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*afCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableCounter), nil + } + i := &afCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -392,14 +428,17 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl return m.delegate.Float64ObservableUpDownCounter(name, options...) } - i := &afUpDownCounter{name: name, opts: options} cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*afUpDownCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableUpDownCounter), nil + } + i := &afUpDownCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -412,14 +451,17 @@ func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64Obs return m.delegate.Float64ObservableGauge(name, options...) } - i := &afGauge{name: name, opts: options} cfg := metric.NewFloat64ObservableGaugeConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*afGauge)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableGauge), nil + } + i := &afGauge{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -487,6 +529,7 @@ func (c *registration) setDelegate(m metric.Meter) { reg, err := m.RegisterCallback(c.function, insts...) if err != nil { GetErrorHandler().Handle(err) + return } c.unreg = reg.Unregister diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index 9b1da2c02b9..b2fe3e41d3b 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -20,7 +20,8 @@ func RawToBool(r uint64) bool { } func Int64ToRaw(i int64) uint64 { - return uint64(i) + // Assumes original was a valid int64 (overflow not checked). + return uint64(i) // nolint: gosec } func RawToInt64(r uint64) int64 { diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index ea52e402331..a535782e1d9 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -351,7 +351,7 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption { // // cp := make([]attribute.KeyValue, len(attributes)) // copy(cp, attributes) -// WithAttributes(attribute.NewSet(cp...)) +// WithAttributeSet(attribute.NewSet(cp...)) // // [attribute.NewSet] may modify the passed attributes so this will make a copy // of attributes before creating a set in order to ensure this function is diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index 4d36b98cf48..0a29a2f13d8 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -23,6 +23,10 @@ { "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], "groupName": "googleapis" + }, + { + "matchPackageNames": ["golang.org/x/**"], + "groupName": "golang.org/x" } ] } diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh deleted file mode 100644 index e57bf57fce8..00000000000 --- a/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -cd $(dirname $0) -TOOLS_DIR=$(pwd)/.tools - -if [ -z "${GOPATH}" ] ; then - printf "GOPATH is not defined.\n" - exit -1 -fi - -if [ ! -d "${GOPATH}" ] ; then - printf "GOPATH ${GOPATH} is invalid \n" - exit -1 -fi - -# Pre-requisites -if ! git diff --quiet; then \ - git status - printf "\n\nError: working tree is not clean\n" - exit -1 -fi - -if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then - printf "$(git log -1)" - printf "\n\nError: HEAD is not pointing to a tagged version" -fi - -make ${TOOLS_DIR}/gojq - -DIR_TMP="${GOPATH}/src/oteltmp/" -rm -rf $DIR_TMP -mkdir -p $DIR_TMP - -printf "Copy examples to ${DIR_TMP}\n" -cp -a ./example ${DIR_TMP} - -# Update go.mod files -printf "Update go.mod: rename module and remove replace\n" - -PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) - -for dir in $PACKAGE_DIRS; do - printf " Update go.mod for $dir\n" - (cd "${DIR_TMP}/${dir}" && \ - # replaces is ("mod1" "mod2" …) - replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ - # strip double quotes - replaces=("${replaces[@]%\"}") && \ - replaces=("${replaces[@]#\"}") && \ - # make an array (-dropreplace=mod1 -dropreplace=mod2 …) - dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ - go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ - go mod tidy) -done -printf "Update done:\n\n" - -# Build directories that contain main package. These directories are different than -# directories that contain go.mod files. -printf "Build examples:\n" -EXAMPLES=$(./get_main_pkgs.sh ./example) -for ex in $EXAMPLES; do - printf " Build $ex in ${DIR_TMP}/${ex}\n" - (cd "${DIR_TMP}/${ex}" && \ - go build .) -done - -# Cleanup -printf "Remove copied files.\n" -rm -rf $DIR_TMP diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 78b40f3ed24..6d3c7b1f40e 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.30.0" + return "1.31.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 0c32f4fc46e..cdebdb5eb78 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.30.0 + version: v1.31.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -29,12 +29,12 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.52.0 + version: v0.53.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.6.0 + version: v0.7.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -42,7 +42,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.9 + version: v0.0.10 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/modules.txt b/vendor/modules.txt index f90b22e16df..f482784ecde 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -486,7 +486,7 @@ github.com/gogo/status # github.com/golang-jwt/jwt/v5 v5.2.1 ## explicit; go 1.18 github.com/golang-jwt/jwt/v5 -# github.com/golang/glog v1.2.1 +# github.com/golang/glog v1.2.2 ## explicit; go 1.19 github.com/golang/glog github.com/golang/glog/internal/logsink @@ -1003,12 +1003,13 @@ github.com/prometheus/common/config github.com/prometheus/common/expfmt github.com/prometheus/common/helpers/templates github.com/prometheus/common/model +github.com/prometheus/common/promslog github.com/prometheus/common/route github.com/prometheus/common/version # github.com/prometheus/common/sigv4 v0.1.0 ## explicit; go 1.15 github.com/prometheus/common/sigv4 -# github.com/prometheus/exporter-toolkit v0.12.0 +# github.com/prometheus/exporter-toolkit v0.13.0 ## explicit; go 1.22 github.com/prometheus/exporter-toolkit/web # github.com/prometheus/procfs v0.15.1 @@ -1016,7 +1017,7 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v0.0.0-20241120160701-db938c3ceac8 +# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v0.0.0-20241121154025-fd7e39a8798e ## explicit; go 1.22.0 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -1063,10 +1064,12 @@ github.com/prometheus/prometheus/tsdb/tsdbutil github.com/prometheus/prometheus/tsdb/wlog github.com/prometheus/prometheus/util/almost github.com/prometheus/prometheus/util/annotations +github.com/prometheus/prometheus/util/convertnhcb github.com/prometheus/prometheus/util/gate github.com/prometheus/prometheus/util/httputil github.com/prometheus/prometheus/util/jsonutil github.com/prometheus/prometheus/util/logging +github.com/prometheus/prometheus/util/notifications github.com/prometheus/prometheus/util/osutil github.com/prometheus/prometheus/util/pool github.com/prometheus/prometheus/util/stats @@ -1152,6 +1155,9 @@ github.com/thanos-io/objstore/providers/gcs github.com/thanos-io/objstore/providers/s3 github.com/thanos-io/objstore/providers/swift github.com/thanos-io/objstore/tracing/opentracing +# github.com/tjhop/slog-gokit v0.1.2 +## explicit; go 1.21 +github.com/tjhop/slog-gokit # github.com/tklauser/go-sysconf v0.3.12 ## explicit; go 1.13 github.com/tklauser/go-sysconf @@ -1286,24 +1292,24 @@ go.opentelemetry.io/collector/pdata/internal/otlp go.opentelemetry.io/collector/pdata/pcommon go.opentelemetry.io/collector/pdata/pmetric go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp -# go.opentelemetry.io/collector/semconv v0.108.1 +# go.opentelemetry.io/collector/semconv v0.110.0 ## explicit; go 1.22.0 go.opentelemetry.io/collector/semconv/v1.6.1 # go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.55.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 ## explicit; go 1.22 go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 ## explicit; go 1.22 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.30.0 +# go.opentelemetry.io/otel v1.31.0 ## explicit; go 1.22 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1319,16 +1325,14 @@ go.opentelemetry.io/otel/semconv/v1.18.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/metric v1.30.0 +# go.opentelemetry.io/otel/metric v1.31.0 ## explicit; go 1.22 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.30.0 -## explicit; go 1.22 # go.opentelemetry.io/otel/sdk/metric v1.30.0 ## explicit; go 1.22 -# go.opentelemetry.io/otel/trace v1.30.0 +# go.opentelemetry.io/otel/trace v1.31.0 ## explicit; go 1.22 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -1613,10 +1617,10 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 ## explicit gopkg.in/yaml.v3 -# k8s.io/apimachinery v0.31.0 +# k8s.io/apimachinery v0.31.1 ## explicit; go 1.22.0 k8s.io/apimachinery/pkg/util/runtime -# k8s.io/client-go v0.31.0 +# k8s.io/client-go v0.31.1 ## explicit; go 1.22.0 k8s.io/client-go/tools/metrics k8s.io/client-go/util/workqueue @@ -1681,7 +1685,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/walk sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 -# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20241120160701-db938c3ceac8 +# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20241121154025-fd7e39a8798e # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe # gopkg.in/yaml.v3 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 # github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20240531075221-3685f1377d7b