From fd9c1e0b7bf1d9c24122e98bdec1747dd5094331 Mon Sep 17 00:00:00 2001 From: Yuri Nikolic Date: Thu, 9 Mar 2023 18:03:31 +0100 Subject: [PATCH 1/6] Making the number of CPUs used for WAL replay configurable --- CHANGELOG.md | 2 + cmd/mimir/config-descriptor.json | 13 +- cmd/mimir/help-all.txt.tmpl | 4 +- .../configuration-parameters/index.md | 7 +- go.mod | 6 +- go.sum | 13 +- pkg/ingester/ingester.go | 149 ++-- pkg/ingester/ingester_test.go | 135 ++- pkg/ingester/shipper.go | 2 +- pkg/storage/tsdb/config.go | 24 +- pkg/storage/tsdb/config_test.go | 2 +- .../aws/credentials/processcreds/provider.go | 24 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 549 +++++++++++- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/sns/api.go | 72 +- .../prometheus/prometheus/config/config.go | 132 ++- .../prometheus/model/labels/labels.go | 12 +- .../prometheus/model/labels/labels_string.go | 788 ++++++++++++++++++ .../prometheus/model/relabel/relabel.go | 17 +- .../model/textparse/openmetricsparse.go | 77 +- .../prometheus/model/textparse/promparse.go | 36 +- .../prompb/io/prometheus/client/metrics.pb.go | 187 +++-- .../prompb/io/prometheus/client/metrics.proto | 39 +- .../prometheus/prometheus/prompb/types.pb.go | 150 ++-- .../prometheus/prometheus/prompb/types.proto | 8 +- .../prometheus/prometheus/promql/value.go | 2 +- .../prometheus/prometheus/rules/alerting.go | 2 + .../prometheus/prometheus/rules/origin.go | 69 ++ .../prometheus/prometheus/rules/recording.go | 2 + .../prometheus/prometheus/scrape/manager.go | 7 +- .../prometheus/prometheus/scrape/scrape.go | 2 +- .../prometheus/storage/remote/codec.go | 8 +- .../storage/remote/queue_manager.go | 4 + .../prometheus/prometheus/tsdb/db.go | 7 + .../prometheus/prometheus/tsdb/head.go | 164 ++-- .../prometheus/prometheus/tsdb/head_read.go | 45 +- .../prometheus/prometheus/tsdb/head_wal.go | 73 +- .../prometheus/prometheus/tsdb/test.txt | 0 .../prometheus/tsdb/tsdbutil/chunks.go | 16 +- .../prometheus/tsdb/tsdbutil/histogram.go | 110 +++ .../prometheus/prometheus/tsdb/wlog/wlog.go | 17 +- .../prometheus/util/strutil/strconv.go | 21 + .../prometheus/prometheus/web/api/v1/api.go | 22 +- vendor/go.uber.org/goleak/CHANGELOG.md | 10 +- vendor/go.uber.org/goleak/Makefile | 10 +- vendor/go.uber.org/goleak/README.md | 5 +- .../go.uber.org/goleak/internal/stack/doc.go | 22 + vendor/modules.txt | 8 +- 48 files changed, 2445 insertions(+), 631 deletions(-) create mode 100644 vendor/github.com/prometheus/prometheus/model/labels/labels_string.go create mode 100644 vendor/github.com/prometheus/prometheus/rules/origin.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/test.txt create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go create mode 100644 vendor/go.uber.org/goleak/internal/stack/doc.go diff --git a/CHANGELOG.md b/CHANGELOG.md index b6861c72d13..6d26ae23f67 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ * `cortex_bucket_store_series_get_all_duration_seconds` * `cortex_bucket_store_series_merge_duration_seconds` * [CHANGE] Ingester: changed default value of `-blocks-storage.tsdb.retention-period` from `24h` to `13h`. If you're running Mimir with a custom configuration and you're overriding `-querier.query-store-after` to a value greater than the default `12h` then you should increase `-blocks-storage.tsdb.retention-period` accordingly. #4382 +* [CHANGE] Ingester: the configuration parameter `-blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup` has been deprecated and will be removed in Mimir 2.10. * [FEATURE] Cache: Introduce experimental support for using Redis for results, chunks, index, and metadata caches. #4371 * [ENHANCEMENT] Allow to define service name used for tracing via `JAEGER_SERVICE_NAME` environment variable. #4394 * [ENHANCEMENT] Querier and query-frontend: add experimental, more performant protobuf query result response format enabled with `-query-frontend.query-result-response-format=protobuf`. #4304 #4318 #4375 @@ -19,6 +20,7 @@ * [ENHANCEMENT] Querier and store-gateway: optimized `.*` and `.+` regular expression label matchers. #4432 * [ENHANCEMENT] Query-frontend: results cache TTL is now configurable by using `-query-frontend.results-cache-ttl` and `-query-frontend.results-cache-ttl-for-out-of-order-time-window` options. These values can also be specified per tenant. Default values are unchanged (7 days and 10 minutes respectively). #4385 * [BUGFIX] Querier: Streaming remote read will now continue to return multiple chunks per frame after the first frame. #4423 +* [ENHANCEMENT] Ingester: added advanced parameter `-blocks-storage.tsdb.wal-replay-concurrency` representing the maximum number of CPUs used during WAL replay. ### Mixin diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index 15e1258086d..a872a6f5f1d 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -7089,6 +7089,17 @@ "fieldType": "int", "fieldCategory": "advanced" }, + { + "kind": "field", + "name": "wal_replay_concurrency", + "required": false, + "desc": "Maximum number of CPUs that can simultaneously processes WAL replay. 0 means disabled.", + "fieldValue": null, + "fieldDefaultValue": 0, + "fieldFlag": "blocks-storage.tsdb.wal-replay-concurrency", + "fieldType": "int", + "fieldCategory": "advanced" + }, { "kind": "field", "name": "flush_blocks_on_shutdown", @@ -7153,7 +7164,7 @@ "fieldDefaultValue": 10, "fieldFlag": "blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup", "fieldType": "int", - "fieldCategory": "advanced" + "fieldCategory": "deprecated" }, { "kind": "field", diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index 8bed13a8cba..221e807f8f0 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -662,7 +662,7 @@ Usage of ./cmd/mimir/mimir: -blocks-storage.tsdb.head-postings-for-matchers-cache-ttl duration [experimental] How long to cache postings for matchers in the Head and OOOHead. 0 disables the cache and just deduplicates the in-flight calls. (default 10s) -blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup int - limit the number of concurrently opening TSDB's on startup (default 10) + [deprecated] limit the number of concurrently opening TSDB's on startup (default 10) -blocks-storage.tsdb.memory-snapshot-on-shutdown [experimental] True to enable snapshotting of in-memory TSDB data on disk when shutting down. -blocks-storage.tsdb.out-of-order-capacity-max int @@ -679,6 +679,8 @@ Usage of ./cmd/mimir/mimir: The number of shards of series to use in TSDB (must be a power of 2). Reducing this will decrease memory footprint, but can negatively impact performance. (default 16384) -blocks-storage.tsdb.wal-compression-enabled True to enable TSDB WAL compression. + -blocks-storage.tsdb.wal-replay-concurrency int + Maximum number of CPUs that can simultaneously processes WAL replay. 0 means disabled. -blocks-storage.tsdb.wal-segment-size-bytes int TSDB WAL segments files max size (bytes). (default 134217728) -common.storage.azure.account-key string diff --git a/docs/sources/mimir/references/configuration-parameters/index.md b/docs/sources/mimir/references/configuration-parameters/index.md index b5c47c17613..69ff7cb6f97 100644 --- a/docs/sources/mimir/references/configuration-parameters/index.md +++ b/docs/sources/mimir/references/configuration-parameters/index.md @@ -3267,6 +3267,11 @@ tsdb: # CLI flag: -blocks-storage.tsdb.wal-segment-size-bytes [wal_segment_size_bytes: | default = 134217728] + # (advanced) Maximum number of CPUs that can simultaneously processes WAL + # replay. 0 means disabled. + # CLI flag: -blocks-storage.tsdb.wal-replay-concurrency + [wal_replay_concurrency: | default = 0] + # (advanced) True to flush blocks to storage on shutdown. If false, incomplete # blocks will be reused after restart. # CLI flag: -blocks-storage.tsdb.flush-blocks-on-shutdown @@ -3299,7 +3304,7 @@ tsdb: # CLI flag: -blocks-storage.tsdb.series-hash-cache-max-size-bytes [series_hash_cache_max_size_bytes: | default = 1073741824] - # (advanced) limit the number of concurrently opening TSDB's on startup + # (deprecated) limit the number of concurrently opening TSDB's on startup # CLI flag: -blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup [max_tsdb_opening_concurrency_on_startup: | default = 10] diff --git a/go.mod b/go.mod index f44f56e87d7..26217a5d228 100644 --- a/go.mod +++ b/go.mod @@ -39,7 +39,7 @@ require ( github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/weaveworks/common v0.0.0-20230119144549-0aaa5abd1e63 go.uber.org/atomic v1.10.0 - go.uber.org/goleak v1.2.0 + go.uber.org/goleak v1.2.1 golang.org/x/crypto v0.3.0 golang.org/x/net v0.7.0 golang.org/x/sync v0.1.0 @@ -88,7 +88,7 @@ require ( github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/armon/go-metrics v0.4.0 // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect - github.com/aws/aws-sdk-go v1.44.187 // indirect + github.com/aws/aws-sdk-go v1.44.207 // indirect github.com/aws/aws-sdk-go-v2 v1.16.0 // indirect github.com/aws/aws-sdk-go-v2/config v1.15.1 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.11.0 // indirect @@ -228,7 +228,7 @@ require ( ) // Using a fork of Prometheus with Mimir-specific changes. -replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20230309083841-242e82b8e667 +replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20230309145355-024edcdda34c // Replace memberlist with our fork which includes some fixes that haven't been // merged upstream yet: diff --git a/go.sum b/go.sum index 739ea8109c1..06d1c78b73a 100644 --- a/go.sum +++ b/go.sum @@ -129,8 +129,8 @@ github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.187 h1:D5CsRomPnlwDHJCanL2mtaLIcbhjiWxNh5j8zvaWdJA= -github.com/aws/aws-sdk-go v1.44.187/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.207 h1:7O0AMKxTm+/GUx6zw+3dqc+fD3tTzv8xaZPYo+ywRwE= +github.com/aws/aws-sdk-go v1.44.207/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v1.16.0 h1:cBAYjiiexRAg9v2z9vb6IdxAa7ef4KCtjW7w7e3GxGo= github.com/aws/aws-sdk-go-v2 v1.16.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI= github.com/aws/aws-sdk-go-v2/config v1.15.1 h1:hTIZFepYESYyowQUBo47lu69WSxsYqGUILY9Nu8+7pY= @@ -505,8 +505,8 @@ github.com/grafana/gomemcache v0.0.0-20230221082510-6cde04bf2270 h1:cj3uiNKskh+/ github.com/grafana/gomemcache v0.0.0-20230221082510-6cde04bf2270/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/mimir-prometheus v0.0.0-20230309083841-242e82b8e667 h1:fsY1JTaeHdpU2PZKrw6U5jazlaCoG8CE4As6Q0268LI= -github.com/grafana/mimir-prometheus v0.0.0-20230309083841-242e82b8e667/go.mod h1:XHIzUaYXL352XOhSF/R0eZ+/k2x6u5de/d/X9VWwVnI= +github.com/grafana/mimir-prometheus v0.0.0-20230309145355-024edcdda34c h1:4iR9RM+tvyhPWQJ5ct7x0cP1DeuAyJt4yh9dEYPyGDk= +github.com/grafana/mimir-prometheus v0.0.0-20230309145355-024edcdda34c/go.mod h1:eNd62DoOh4+xRKDW2mK5qb8RAKMSsOpw96ZyUqdlR4E= github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6 h1:A3dhViTeFDSQcGOXuUi6ukCQSMyDtDISBp2z6OOo2YM= github.com/grafana/regexp v0.0.0-20221005093135-b4c2bcb0a4b6/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= @@ -962,8 +962,8 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= @@ -1018,7 +1018,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 6b26c47e667..7038b1db43d 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -109,6 +109,9 @@ const ( tenantsWithOutOfOrderEnabledStatName = "ingester_ooo_enabled_tenants" minOutOfOrderTimeWindowSecondsStatName = "ingester_ooo_min_window" maxOutOfOrderTimeWindowSecondsStatName = "ingester_ooo_max_window" + + // maximum number of TSDBs present on the file system which can be opened in a single process without walReplayConcurrency + maxTSDBOpenWithoutConcurrency = 10 ) // BlocksUploader interface is used to have an easy way to mock it in tests. @@ -1690,8 +1693,10 @@ func (i *Ingester) getOrCreateTSDB(userID string, force bool) (*userTSDB, error) return db, nil } +type tsdbOption func(*tsdb.Options) + // createTSDB creates a TSDB for a given userID, and returns the created db. -func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { +func (i *Ingester) createTSDB(userID string, additionalTsdbOptions ...tsdbOption) (*userTSDB, error) { tsdbPromReg := prometheus.NewRegistry() udir := i.cfg.BlocksStorageConfig.TSDB.BlocksDir(userID) userLogger := util_log.WithUserID(userID, i.logger) @@ -1712,8 +1717,7 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { maxExemplars := i.limiter.convertGlobalToLocalLimit(userID, i.limits.MaxGlobalExemplarsPerUser(userID)) oooTW := i.limits.OutOfOrderTimeWindow(userID) - // Create a new user database - db, err := tsdb.Open(udir, userLogger, tsdbPromReg, &tsdb.Options{ + tsdbOptions := &tsdb.Options{ RetentionDuration: i.cfg.BlocksStorageConfig.TSDB.Retention.Milliseconds(), MinBlockDuration: blockRanges[0], MaxBlockDuration: blockRanges[len(blockRanges)-1], @@ -1738,7 +1742,12 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { HeadPostingsForMatchersCacheSize: i.cfg.BlocksStorageConfig.TSDB.HeadPostingsForMatchersCacheSize, HeadPostingsForMatchersCacheForce: i.cfg.BlocksStorageConfig.TSDB.HeadPostingsForMatchersCacheForce, EnableNativeHistograms: i.limits.NativeHistogramsIngestionEnabled(userID), - }, nil) + } + for _, tsdbOption := range additionalTsdbOptions { + tsdbOption(tsdbOptions) + } + // Create a new user database + db, err := tsdb.Open(udir, userLogger, tsdbPromReg, tsdbOptions, nil) if err != nil { return nil, errors.Wrapf(err, "failed to open TSDB: %s", udir) } @@ -1833,13 +1842,42 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { queue := make(chan string) group, groupCtx := errgroup.WithContext(ctx) + userIDs, err := i.getAllUsersWithTSDB() + if err != nil { + level.Error(i.logger).Log("msg", "error while finding existing TSDBs", "err", err) + return err + } + + if len(userIDs) == 0 { + return nil + } + + var concurrentOpenTSDBCount = i.cfg.BlocksStorageConfig.TSDB.DeprecatedMaxTSDBOpeningConcurrencyOnStartup + var walReplayConcurrency = 0 + // If TSDBConfig.WALReplayConcurrency is set to a positive value, we honor it and ignore value of + // TSDB.DeprecatedMaxTSDBOpeningConcurrencyOnStartup, being the latter deprecated. + // If TSDBConfig.WALReplayConcurrency is 0, it is ignored, and TSDB.DeprecatedMaxTSDBOpeningConcurrencyOnStartup + // determines the number of concurrent processes opening TSDBs. + if i.cfg.BlocksStorageConfig.TSDB.WALReplayConcurrency > 0 { + if len(userIDs) <= maxTSDBOpenWithoutConcurrency { + concurrentOpenTSDBCount = 1 + walReplayConcurrency = i.cfg.BlocksStorageConfig.TSDB.WALReplayConcurrency + } else { + concurrentOpenTSDBCount = i.cfg.BlocksStorageConfig.TSDB.WALReplayConcurrency + walReplayConcurrency = 1 + } + } + walReplayConcurrencyOption := func(tsdbOptions *tsdb.Options) { + tsdbOptions.WALReplayConcurrency = walReplayConcurrency + } + // Create a pool of workers which will open existing TSDBs. - for n := 0; n < i.cfg.BlocksStorageConfig.TSDB.MaxTSDBOpeningConcurrencyOnStartup; n++ { + for n := 0; n < concurrentOpenTSDBCount; n++ { group.Go(func() error { for userID := range queue { startTime := time.Now() - db, err := i.createTSDB(userID) + db, err := i.createTSDB(userID, walReplayConcurrencyOption) if err != nil { level.Error(i.logger).Log("msg", "unable to open TSDB", "err", err, "user", userID) return errors.Wrapf(err, "unable to open TSDB for user %s", userID) @@ -1858,46 +1896,11 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { }) } - // Spawn a goroutine to find all users with a TSDB on the filesystem. + // Spawn a goroutine to place on the queue all users with a TSDB found on the filesystem. group.Go(func() error { - // Close the queue once filesystem walking is done. defer close(queue) - walkErr := filepath.Walk(i.cfg.BlocksStorageConfig.TSDB.Dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - // If the root directory doesn't exist, we're OK (not needed to be created upfront). - if os.IsNotExist(err) && path == i.cfg.BlocksStorageConfig.TSDB.Dir { - return filepath.SkipDir - } - - level.Error(i.logger).Log("msg", "an error occurred while iterating the filesystem storing TSDBs", "path", path, "err", err) - return errors.Wrapf(err, "an error occurred while iterating the filesystem storing TSDBs at %s", path) - } - - // Skip root dir and all other files - if path == i.cfg.BlocksStorageConfig.TSDB.Dir || !info.IsDir() { - return nil - } - - // Top level directories are assumed to be user TSDBs - userID := info.Name() - f, err := os.Open(path) - if err != nil { - level.Error(i.logger).Log("msg", "unable to open TSDB dir", "err", err, "user", userID, "path", path) - return errors.Wrapf(err, "unable to open TSDB dir %s for user %s", path, userID) - } - defer f.Close() - - // If the dir is empty skip it - if _, err := f.Readdirnames(1); err != nil { - if errors.Is(err, io.EOF) { - return filepath.SkipDir - } - - level.Error(i.logger).Log("msg", "unable to read TSDB dir", "err", err, "user", userID, "path", path) - return errors.Wrapf(err, "unable to read TSDB dir %s for user %s", path, userID) - } - + for userID := range userIDs { // Enqueue the user to be processed. select { case queue <- userID: @@ -1906,16 +1909,12 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { // Interrupt in case a failure occurred in another goroutine. return nil } - - // Don't descend into subdirectories. - return filepath.SkipDir - }) - - return errors.Wrapf(walkErr, "unable to walk directory %s containing existing TSDBs", i.cfg.BlocksStorageConfig.TSDB.Dir) + } + return nil }) // Wait for all workers to complete. - err := group.Wait() + err = group.Wait() if err != nil { level.Error(i.logger).Log("msg", "error while opening existing TSDBs", "err", err) return err @@ -1928,6 +1927,54 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { return nil } +// getAllUsersWithTSDB finds all users with a TSDB on the filesystem. +func (i *Ingester) getAllUsersWithTSDB() (map[string]struct{}, error) { + userIDs := make(map[string]struct{}) + walkErr := filepath.Walk(i.cfg.BlocksStorageConfig.TSDB.Dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + // If the root directory doesn't exist, we're OK (not needed to be created upfront). + if os.IsNotExist(err) && path == i.cfg.BlocksStorageConfig.TSDB.Dir { + return filepath.SkipDir + } + + level.Error(i.logger).Log("msg", "an error occurred while iterating the filesystem storing TSDBs", "path", path, "err", err) + return errors.Wrapf(err, "an error occurred while iterating the filesystem storing TSDBs at %s", path) + } + + // Skip root dir and all other files + if path == i.cfg.BlocksStorageConfig.TSDB.Dir || !info.IsDir() { + return nil + } + + // Top level directories are assumed to be user TSDBs + userID := info.Name() + f, err := os.Open(path) + if err != nil { + level.Error(i.logger).Log("msg", "unable to open TSDB dir", "err", err, "user", userID, "path", path) + return errors.Wrapf(err, "unable to open TSDB dir %s for user %s", path, userID) + } + defer f.Close() + + // If the dir is empty skip it + if _, err := f.Readdirnames(1); err != nil { + if errors.Is(err, io.EOF) { + return filepath.SkipDir + } + + level.Error(i.logger).Log("msg", "unable to read TSDB dir", "err", err, "user", userID, "path", path) + return errors.Wrapf(err, "unable to read TSDB dir %s for user %s", path, userID) + } + + // Save userId. + userIDs[userID] = struct{}{} + + // Don't descend into subdirectories. + return filepath.SkipDir + }) + + return userIDs, errors.Wrapf(walkErr, "unable to walk directory %s containing existing TSDBs", i.cfg.BlocksStorageConfig.TSDB.Dir) +} + // getOldestUnshippedBlockMetric returns the unix timestamp of the oldest unshipped block or // 0 if all blocks have been shipped. func (i *Ingester) getOldestUnshippedBlockMetric() float64 { @@ -2195,7 +2242,7 @@ func (i *Ingester) closeAndDeleteUserTSDBIfIdle(userID string) tsdbCloseCheckRes userDB.casState(closing, closed) // Only remove user from TSDBState when everything is cleaned up - // This will prevent concurrency problems when cortex are trying to open new TSDB - Ie: New request for a given tenant + // This will prevent walReplayConcurrency problems when cortex are trying to open new TSDB - Ie: New request for a given tenant // came in - while closing the tsdb for the same tenant. // If this happens now, the request will get reject as the push will not be able to acquire the lock as the tsdb will be // in closed state diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 1910cc8d74b..dfabf386908 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -20,6 +20,7 @@ import ( "net/url" "os" "path/filepath" + "reflect" "sort" "strconv" "strings" @@ -40,6 +41,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -424,7 +426,7 @@ func TestIngester_Push(t *testing.T) { TimeSeries: &mimirpb.TimeSeries{ Labels: metricLabelAdapters, Samples: []mimirpb.Sample{{Value: 0, TimestampMs: 1575043969 - (86400 * 1000)}, {Value: 1, TimestampMs: 1575043969 - (86000 * 1000)}}, - Histograms: []mimirpb.Histogram{mimirpb.FromHistogramToHistogramProto(1575043969-(86800*1000), tsdb.GenerateTestHistogram(0))}, + Histograms: []mimirpb.Histogram{mimirpb.FromHistogramToHistogramProto(1575043969-(86800*1000), tsdbutil.GenerateTestHistogram(0))}, }, }, }, @@ -478,7 +480,7 @@ func TestIngester_Push(t *testing.T) { TimeSeries: &mimirpb.TimeSeries{ Labels: metricLabelAdapters, Samples: []mimirpb.Sample{{Value: 0, TimestampMs: 1575043969 + 1000}}, - Histograms: []mimirpb.Histogram{mimirpb.FromHistogramToHistogramProto(1575043969-(86800*1000), tsdb.GenerateTestHistogram(0))}, + Histograms: []mimirpb.Histogram{mimirpb.FromHistogramToHistogramProto(1575043969-(86800*1000), tsdbutil.GenerateTestHistogram(0))}, }, }, }, @@ -1176,19 +1178,19 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { numSeriesPerRequest int numConcurrentClients int }{ - "no concurrency": { + "no walReplayConcurrency": { numSeriesPerRequest: 500, numConcurrentClients: 1, }, - "low concurrency": { + "low walReplayConcurrency": { numSeriesPerRequest: 500, numConcurrentClients: 100, }, - "high concurrency": { + "high walReplayConcurrency": { numSeriesPerRequest: 500, numConcurrentClients: 1000, }, - "low number of series per request and very high concurrency": { + "low number of series per request and very high walReplayConcurrency": { numSeriesPerRequest: 100, numConcurrentClients: 2500, }, @@ -3146,13 +3148,14 @@ func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) { t.Parallel() tests := map[string]struct { - concurrency int - setup func(*testing.T, string) - check func(*testing.T, *Ingester) - expectedErr string + walReplayConcurrency int + deprecatedMaxTSDBOpeningConcurrencyOnStartup int + setup func(*testing.T, string) + check func(*testing.T, *Ingester) + expectedErr string }{ "should not load TSDB if the user directory is empty": { - concurrency: 10, + walReplayConcurrency: 10, setup: func(t *testing.T, dir string) { require.NoError(t, os.Mkdir(filepath.Join(dir, "user0"), 0700)) }, @@ -3161,14 +3164,14 @@ func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) { }, }, "should not load any TSDB if the root directory is empty": { - concurrency: 10, - setup: func(t *testing.T, dir string) {}, + walReplayConcurrency: 10, + setup: func(t *testing.T, dir string) {}, check: func(t *testing.T, i *Ingester) { require.Zero(t, len(i.tsdbs)) }, }, "should not load any TSDB is the root directory is missing": { - concurrency: 10, + walReplayConcurrency: 10, setup: func(t *testing.T, dir string) { require.NoError(t, os.Remove(dir)) }, @@ -3177,7 +3180,7 @@ func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) { }, }, "should load TSDB for any non-empty user directory": { - concurrency: 10, + walReplayConcurrency: 10, setup: func(t *testing.T, dir string) { require.NoError(t, os.MkdirAll(filepath.Join(dir, "user0", "dummy"), 0700)) require.NoError(t, os.MkdirAll(filepath.Join(dir, "user1", "dummy"), 0700)) @@ -3190,8 +3193,8 @@ func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) { require.Nil(t, i.getTSDB("user2")) }, }, - "should load all TSDBs on concurrency < number of TSDBs": { - concurrency: 2, + "should load all TSDBs on walReplayConcurrency < number of TSDBs": { + walReplayConcurrency: 2, setup: func(t *testing.T, dir string) { require.NoError(t, os.MkdirAll(filepath.Join(dir, "user0", "dummy"), 0700)) require.NoError(t, os.MkdirAll(filepath.Join(dir, "user1", "dummy"), 0700)) @@ -3206,10 +3209,64 @@ func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) { require.NotNil(t, i.getTSDB("user2")) require.NotNil(t, i.getTSDB("user3")) require.NotNil(t, i.getTSDB("user4")) + walReplayConcurrency := getWALReplayConcurrencyFromTSDBHeadOptions(i.getTSDB("user0")) + require.Equal(t, 2, walReplayConcurrency) }, }, - "should fail and rollback if an error occur while loading a TSDB on concurrency > number of TSDBs": { - concurrency: 10, + "should load all TSDBs on walReplayConcurrency > number of TSDBs": { + walReplayConcurrency: 10, + setup: func(t *testing.T, dir string) { + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user0", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user1", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user2", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user3", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user4", "dummy"), 0700)) + }, + check: func(t *testing.T, i *Ingester) { + require.Equal(t, 5, len(i.tsdbs)) + require.NotNil(t, i.getTSDB("user0")) + require.NotNil(t, i.getTSDB("user1")) + require.NotNil(t, i.getTSDB("user2")) + require.NotNil(t, i.getTSDB("user3")) + require.NotNil(t, i.getTSDB("user4")) + walReplayConcurrency := getWALReplayConcurrencyFromTSDBHeadOptions(i.getTSDB("user0")) + require.Equal(t, 10, walReplayConcurrency) + }, + }, + "should load all TSDBs on number of TSDBs > maxTSDBOpenWithoutConcurrency": { + walReplayConcurrency: 2, + setup: func(t *testing.T, dir string) { + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user0", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user1", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user2", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user3", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user4", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user5", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user6", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user7", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user8", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user9", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user10", "dummy"), 0700)) + }, + check: func(t *testing.T, i *Ingester) { + require.Equal(t, 11, len(i.tsdbs)) + require.NotNil(t, i.getTSDB("user0")) + require.NotNil(t, i.getTSDB("user1")) + require.NotNil(t, i.getTSDB("user2")) + require.NotNil(t, i.getTSDB("user3")) + require.NotNil(t, i.getTSDB("user4")) + require.NotNil(t, i.getTSDB("user5")) + require.NotNil(t, i.getTSDB("user6")) + require.NotNil(t, i.getTSDB("user7")) + require.NotNil(t, i.getTSDB("user8")) + require.NotNil(t, i.getTSDB("user9")) + require.NotNil(t, i.getTSDB("user10")) + walReplayConcurrency := getWALReplayConcurrencyFromTSDBHeadOptions(i.getTSDB("user0")) + require.Equal(t, 1, walReplayConcurrency) + }, + }, + "should fail and rollback if an error occur while loading a TSDB on walReplayConcurrency > number of TSDBs": { + walReplayConcurrency: 10, setup: func(t *testing.T, dir string) { // Create a fake TSDB on disk with an empty chunks head segment file (it's invalid unless // it's the last one and opening TSDB should fail). @@ -3227,8 +3284,8 @@ func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) { }, expectedErr: "unable to open TSDB for user user0", }, - "should fail and rollback if an error occur while loading a TSDB on concurrency < number of TSDBs": { - concurrency: 2, + "should fail and rollback if an error occur while loading a TSDB on walReplayConcurrency < number of TSDBs": { + walReplayConcurrency: 2, setup: func(t *testing.T, dir string) { require.NoError(t, os.MkdirAll(filepath.Join(dir, "user0", "dummy"), 0700)) require.NoError(t, os.MkdirAll(filepath.Join(dir, "user1", "dummy"), 0700)) @@ -3252,6 +3309,27 @@ func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) { }, expectedErr: "unable to open TSDB for user user2", }, + "should load all TSDBs and honor DeprecatedMaxTSDBOpeningConcurrencyOnStartup when walReplayConcurrency = 0": { + walReplayConcurrency: 0, + deprecatedMaxTSDBOpeningConcurrencyOnStartup: 2, + setup: func(t *testing.T, dir string) { + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user0", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user1", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user2", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user3", "dummy"), 0700)) + require.NoError(t, os.MkdirAll(filepath.Join(dir, "user4", "dummy"), 0700)) + }, + check: func(t *testing.T, i *Ingester) { + require.Equal(t, 5, len(i.tsdbs)) + require.NotNil(t, i.getTSDB("user0")) + require.NotNil(t, i.getTSDB("user1")) + require.NotNil(t, i.getTSDB("user2")) + require.NotNil(t, i.getTSDB("user3")) + require.NotNil(t, i.getTSDB("user4")) + walReplayConcurrency := getWALReplayConcurrencyFromTSDBHeadOptions(i.getTSDB("user0")) + require.NotEqual(t, 0, walReplayConcurrency) + }, + }, } for name, test := range tests { @@ -3268,7 +3346,7 @@ func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) { ingesterCfg := defaultIngesterTestConfig(t) ingesterCfg.BlocksStorageConfig.TSDB.Dir = tempDir - ingesterCfg.BlocksStorageConfig.TSDB.MaxTSDBOpeningConcurrencyOnStartup = testData.concurrency + ingesterCfg.BlocksStorageConfig.TSDB.WALReplayConcurrency = testData.walReplayConcurrency ingesterCfg.BlocksStorageConfig.Bucket.Backend = "s3" ingesterCfg.BlocksStorageConfig.Bucket.S3.Endpoint = "localhost" @@ -3292,6 +3370,13 @@ func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) { } } +func getWALReplayConcurrencyFromTSDBHeadOptions(userTSDB *userTSDB) int { + head := reflect.ValueOf(userTSDB.db.Head()).Elem() + opts := head.FieldByName("opts").Elem() + walReplayConcurrency := opts.FieldByName("WALReplayConcurrency") + return int(walReplayConcurrency.Int()) +} + func TestIngester_shipBlocks(t *testing.T) { cfg := defaultIngesterTestConfig(t) cfg.BlocksStorageConfig.TSDB.ShipConcurrency = 2 @@ -6450,18 +6535,18 @@ func TestNewIngestErrMsgs(t *testing.T) { } func TestIngesterCanEnableIngestAndQueryNativeHistograms(t *testing.T) { - expectedSampleHistogram := mimirpb.FromMimirSampleToPromHistogram(mimirpb.FromFloatHistogramToSampleHistogram(tsdb.GenerateTestFloatHistogram(0))) + expectedSampleHistogram := mimirpb.FromMimirSampleToPromHistogram(mimirpb.FromFloatHistogramToSampleHistogram(tsdbutil.GenerateTestFloatHistogram(0))) tests := map[string]struct { sampleHistograms []mimirpb.Histogram expectHistogram *model.SampleHistogram }{ "integer histogram": { - sampleHistograms: []mimirpb.Histogram{mimirpb.FromHistogramToHistogramProto(1, tsdb.GenerateTestHistogram(0))}, + sampleHistograms: []mimirpb.Histogram{mimirpb.FromHistogramToHistogramProto(1, tsdbutil.GenerateTestHistogram(0))}, expectHistogram: expectedSampleHistogram, }, "float histogram": { - sampleHistograms: []mimirpb.Histogram{mimirpb.FromFloatHistogramToHistogramProto(1, tsdb.GenerateTestFloatHistogram(0))}, + sampleHistograms: []mimirpb.Histogram{mimirpb.FromFloatHistogramToHistogramProto(1, tsdbutil.GenerateTestFloatHistogram(0))}, expectHistogram: expectedSampleHistogram, }, } diff --git a/pkg/ingester/shipper.go b/pkg/ingester/shipper.go index 46eef1948e2..4f71b226bdf 100644 --- a/pkg/ingester/shipper.go +++ b/pkg/ingester/shipper.go @@ -112,7 +112,7 @@ func NewShipper( // Sync performs a single synchronization, which ensures all non-compacted local blocks have been uploaded // to the object bucket once. // -// It is not concurrency-safe, however it is compactor-safe (running concurrently with compactor is ok). +// It is not walReplayConcurrency-safe, however it is compactor-safe (running concurrently with compactor is ok). func (s *Shipper) Sync(ctx context.Context) (shipped int, err error) { shippedBlocks, err := readShippedBlocks(s.dir) if err != nil { diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index db06fb43f6a..d6aff66f50a 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -82,7 +82,8 @@ const ( headPostingsForMatchersCacheSizeHelp = "Maximum number of entries in the cache for postings for matchers in the Head and OOOHead when ttl > 0." headPostingsForMatchersCacheForce = "Force the cache to be used for postings for matchers in the Head and OOOHead, even if it's not a concurrent (query-sharding) call." - consistencyDelayFlag = "blocks-storage.bucket-store.consistency-delay" + consistencyDelayFlag = "blocks-storage.bucket-store.consistency-delay" + maxTSDBOpeningConcurrencyOnStartupFlag = "blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup" ) // Validation errors @@ -92,6 +93,7 @@ var ( errInvalidCompactionInterval = errors.New("invalid TSDB compaction interval") errInvalidCompactionConcurrency = errors.New("invalid TSDB compaction concurrency") errInvalidWALSegmentSizeBytes = errors.New("invalid TSDB WAL segment size bytes") + errInvalidWALReplayConcurrency = errors.New("invalid TSDB WAL replay concurrency") errInvalidStripeSize = errors.New("invalid TSDB stripe size") errInvalidStreamingBatchSize = errors.New("invalid store-gateway streaming batch size") errEmptyBlockranges = errors.New("empty block ranges for TSDB") @@ -154,7 +156,7 @@ func (cfg *BlocksStorageConfig) Validate(logger log.Logger) error { return err } - if err := cfg.TSDB.Validate(); err != nil { + if err := cfg.TSDB.Validate(logger); err != nil { return err } @@ -178,6 +180,7 @@ type TSDBConfig struct { StripeSize int `yaml:"stripe_size" category:"advanced"` WALCompressionEnabled bool `yaml:"wal_compression_enabled" category:"advanced"` WALSegmentSizeBytes int `yaml:"wal_segment_size_bytes" category:"advanced"` + WALReplayConcurrency int `yaml:"wal_replay_concurrency" category:"advanced"` FlushBlocksOnShutdown bool `yaml:"flush_blocks_on_shutdown" category:"advanced"` CloseIdleTSDBTimeout time.Duration `yaml:"close_idle_tsdb_timeout" category:"advanced"` MemorySnapshotOnShutdown bool `yaml:"memory_snapshot_on_shutdown" category:"experimental"` @@ -186,8 +189,8 @@ type TSDBConfig struct { // Series hash cache. SeriesHashCacheMaxBytes uint64 `yaml:"series_hash_cache_max_size_bytes" category:"advanced"` - // MaxTSDBOpeningConcurrencyOnStartup limits the number of concurrently opening TSDB's during startup. - MaxTSDBOpeningConcurrencyOnStartup int `yaml:"max_tsdb_opening_concurrency_on_startup" category:"advanced"` + // DeprecatedMaxTSDBOpeningConcurrencyOnStartup limits the number of concurrently opening TSDB's during startup. + DeprecatedMaxTSDBOpeningConcurrencyOnStartup int `yaml:"max_tsdb_opening_concurrency_on_startup" category:"deprecated"` // Deprecated. Remove in Mimir 2.10. // If true, user TSDBs are not closed on shutdown. Only for testing. // If false (default), user TSDBs are closed to make sure all resources are released and closed properly. @@ -221,7 +224,7 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.ShipInterval, "blocks-storage.tsdb.ship-interval", 1*time.Minute, "How frequently the TSDB blocks are scanned and new ones are shipped to the storage. 0 means shipping is disabled.") f.IntVar(&cfg.ShipConcurrency, "blocks-storage.tsdb.ship-concurrency", 10, "Maximum number of tenants concurrently shipping blocks to the storage.") f.Uint64Var(&cfg.SeriesHashCacheMaxBytes, "blocks-storage.tsdb.series-hash-cache-max-size-bytes", uint64(1*units.Gibibyte), "Max size - in bytes - of the in-memory series hash cache. The cache is shared across all tenants and it's used only when query sharding is enabled.") - f.IntVar(&cfg.MaxTSDBOpeningConcurrencyOnStartup, "blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup", 10, "limit the number of concurrently opening TSDB's on startup") + f.IntVar(&cfg.DeprecatedMaxTSDBOpeningConcurrencyOnStartup, maxTSDBOpeningConcurrencyOnStartupFlag, 10, "limit the number of concurrently opening TSDB's on startup") f.DurationVar(&cfg.HeadCompactionInterval, "blocks-storage.tsdb.head-compaction-interval", 1*time.Minute, "How frequently the ingester checks whether the TSDB head should be compacted and, if so, triggers the compaction. Mimir applies a jitter to the first check, while subsequent checks will happen at the configured interval. Block is only created if data covers smallest block range. The configured interval must be between 0 and 15 minutes.") f.IntVar(&cfg.HeadCompactionConcurrency, "blocks-storage.tsdb.head-compaction-concurrency", 1, "Maximum number of tenants concurrently compacting TSDB head into a new block") f.DurationVar(&cfg.HeadCompactionIdleTimeout, "blocks-storage.tsdb.head-compaction-idle-timeout", 1*time.Hour, "If TSDB head is idle for this duration, it is compacted. Note that up to 25% jitter is added to the value to avoid ingesters compacting concurrently. 0 means disabled.") @@ -230,6 +233,7 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.StripeSize, "blocks-storage.tsdb.stripe-size", 16384, headStripeSizeHelp) f.BoolVar(&cfg.WALCompressionEnabled, "blocks-storage.tsdb.wal-compression-enabled", false, "True to enable TSDB WAL compression.") f.IntVar(&cfg.WALSegmentSizeBytes, "blocks-storage.tsdb.wal-segment-size-bytes", wlog.DefaultSegmentSize, "TSDB WAL segments files max size (bytes).") + f.IntVar(&cfg.WALReplayConcurrency, "blocks-storage.tsdb.wal-replay-concurrency", 0, "Maximum number of CPUs that can simultaneously processes WAL replay. 0 means disabled.") f.BoolVar(&cfg.FlushBlocksOnShutdown, "blocks-storage.tsdb.flush-blocks-on-shutdown", false, "True to flush blocks to storage on shutdown. If false, incomplete blocks will be reused after restart.") f.DurationVar(&cfg.CloseIdleTSDBTimeout, "blocks-storage.tsdb.close-idle-tsdb-timeout", 13*time.Hour, "If TSDB has not received any data for this duration, and all blocks from TSDB have been shipped, TSDB is closed and deleted from local disk. If set to positive value, this value should be equal or higher than -querier.query-ingesters-within flag to make sure that TSDB is not closed prematurely, which could cause partial query results. 0 or negative value disables closing of idle TSDB.") f.BoolVar(&cfg.MemorySnapshotOnShutdown, "blocks-storage.tsdb.memory-snapshot-on-shutdown", false, "True to enable snapshotting of in-memory TSDB data on disk when shutting down.") @@ -241,13 +245,15 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) { } // Validate the config. -func (cfg *TSDBConfig) Validate() error { +func (cfg *TSDBConfig) Validate(logger log.Logger) error { if cfg.ShipInterval > 0 && cfg.ShipConcurrency <= 0 { return errInvalidShipConcurrency } - if cfg.MaxTSDBOpeningConcurrencyOnStartup <= 0 { + if cfg.DeprecatedMaxTSDBOpeningConcurrencyOnStartup <= 0 { return errInvalidOpeningConcurrency + } else { + util.WarnDeprecatedConfig(maxTSDBOpeningConcurrencyOnStartupFlag, logger) } if cfg.HeadCompactionInterval <= 0 || cfg.HeadCompactionInterval > 15*time.Minute { @@ -274,6 +280,10 @@ func (cfg *TSDBConfig) Validate() error { return errInvalidWALSegmentSizeBytes } + if cfg.WALReplayConcurrency < 0 { + return errInvalidWALReplayConcurrency + } + return nil } diff --git a/pkg/storage/tsdb/config_test.go b/pkg/storage/tsdb/config_test.go index dfb7bf91e8f..f9360378ceb 100644 --- a/pkg/storage/tsdb/config_test.go +++ b/pkg/storage/tsdb/config_test.go @@ -57,7 +57,7 @@ func TestConfig_Validate(t *testing.T) { }, "should fail on invalid opening concurrency": { setup: func(cfg *BlocksStorageConfig) { - cfg.TSDB.MaxTSDBOpeningConcurrencyOnStartup = 0 + cfg.TSDB.DeprecatedMaxTSDBOpeningConcurrencyOnStartup = 0 }, expectedErr: errInvalidOpeningConcurrency, }, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go index e6248360029..18694f07f7f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -226,12 +226,24 @@ func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) return credentials.NewCredentials(p) } -type credentialProcessResponse struct { - Version int - AccessKeyID string `json:"AccessKeyId"` +// A CredentialProcessResponse is the AWS credentials format that must be +// returned when executing an external credential_process. +type CredentialProcessResponse struct { + // As of this writing, the Version key must be set to 1. This might + // increment over time as the structure evolves. + Version int + + // The access key ID that identifies the temporary security credentials. + AccessKeyID string `json:"AccessKeyId"` + + // The secret access key that can be used to sign requests. SecretAccessKey string - SessionToken string - Expiration *time.Time + + // The token that users must pass to the service API to use the temporary credentials. + SessionToken string + + // The date on which the current credentials expire. + Expiration *time.Time } // Retrieve executes the 'credential_process' and returns the credentials. @@ -242,7 +254,7 @@ func (p *ProcessProvider) Retrieve() (credentials.Value, error) { } // Serialize and validate response - resp := &credentialProcessResponse{} + resp := &CredentialProcessResponse{} if err = json.Unmarshal(out, resp); err != nil { return credentials.Value{ProviderName: ProviderName}, awserr.New( ErrCodeProcessProviderParse, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index b340448a6eb..fb403f5d97a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -2605,6 +2605,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -3221,6 +3224,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, @@ -4057,12 +4063,47 @@ var awsPartition = partition{ }, "cases": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, }, }, "cassandra": service{ @@ -4918,6 +4959,76 @@ var awsPartition = partition{ }, }, }, + "cloudtrail-data": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "codeartifact": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -6495,6 +6606,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, @@ -7185,6 +7299,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7206,12 +7323,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8089,6 +8212,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8110,12 +8236,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8950,6 +9082,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8965,12 +9100,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -9403,6 +9544,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -9565,6 +9715,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-ap-southeast-1", }: endpoint{ @@ -10921,6 +11080,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -10936,12 +11098,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -11107,6 +11275,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fms-fips.ap-southeast-2.amazonaws.com", }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -11335,6 +11506,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -12210,6 +12384,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -12495,6 +12672,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -12519,6 +12699,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -14081,6 +14264,9 @@ var awsPartition = partition{ }, "kendra": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -14175,6 +14361,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kendra-ranking.ap-east-1.api.aws", }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{ @@ -14185,6 +14376,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kendra-ranking.ap-northeast-3.api.aws", }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-south-2", }: endpoint{ @@ -14206,14 +14402,14 @@ var awsPartition = partition{ Hostname: "kendra-ranking.ap-southeast-3.api.aws", }, endpointKey{ - Region: "ca-central-1", + Region: "ap-southeast-4", }: endpoint{ - Hostname: "kendra-ranking.ca-central-1.api.aws", + Hostname: "kendra-ranking.ap-southeast-4.api.aws", }, endpointKey{ - Region: "eu-central-1", + Region: "ca-central-1", }: endpoint{ - Hostname: "kendra-ranking.eu-central-1.api.aws", + Hostname: "kendra-ranking.ca-central-1.api.aws", }, endpointKey{ Region: "eu-central-2", @@ -14240,11 +14436,6 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kendra-ranking.eu-west-1.api.aws", }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "kendra-ranking.eu-west-2.api.aws", - }, endpointKey{ Region: "eu-west-3", }: endpoint{ @@ -14482,6 +14673,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -19616,6 +19810,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -25414,12 +25611,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -30161,6 +30364,21 @@ var awscnPartition = partition{ }, }, }, + "servicequotas": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "sms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -31325,6 +31543,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -31333,6 +31569,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "cloudhsm": service{ @@ -31651,6 +31905,26 @@ var awsusgovPartition = partition{ }, }, }, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "compute-optimizer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "compute-optimizer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "config": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -32270,6 +32544,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -32278,6 +32570,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "elasticfilesystem": service{ @@ -32715,21 +33025,43 @@ var awsusgovPartition = partition{ "glacier": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-east-1", }: endpoint{ Hostname: "glacier.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ - Hostname: "glacier.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, + Hostname: "glacier.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, }, }, }, @@ -32800,36 +33132,38 @@ var awsusgovPartition = partition{ endpointKey{ Region: "fips-us-gov-east-1", }: endpoint{ - Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", + Hostname: "greengrass.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-west-1", }: endpoint{ - Hostname: "greengrass.us-gov-east-1.amazonaws.com", + Hostname: "greengrass.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, + Hostname: "greengrass.us-gov-east-1.amazonaws.com", }, endpointKey{ Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, }: endpoint{ Hostname: "greengrass.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, }, }, }, @@ -33028,6 +33362,21 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "inspector": service{ @@ -33070,6 +33419,16 @@ var awsusgovPartition = partition{ }, }, }, + "inspector2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "iot": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -33298,6 +33657,24 @@ var awsusgovPartition = partition{ }, "kinesis": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{ @@ -33306,6 +33683,15 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -33314,6 +33700,15 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "kinesisanalytics": service{ @@ -33887,20 +34282,40 @@ var awsusgovPartition = partition{ "outposts": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-east-1", }: endpoint{ Hostname: "outposts.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ Hostname: "outposts.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", }, }, }, @@ -34025,6 +34440,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -34033,6 +34466,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "rbin": service{ @@ -34379,6 +34830,9 @@ var awsusgovPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -35414,6 +35868,24 @@ var awsusgovPartition = partition{ Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ @@ -35422,6 +35894,24 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "synthetics": service{ @@ -36788,6 +37278,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "dms": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index d5a26e3e773..00526729422 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.187" +const SDKVersion = "1.44.207" diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go index 12fcef703f0..3f6e837bad7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go @@ -6469,12 +6469,14 @@ type GetTopicAttributesOutput struct { // // * Policy – The JSON serialization of the topic's access control policy. // - // * SignatureVersion – The version of the Amazon SNS signature used for - // the topic. By default, SignatureVersion is set to 1. The signature is - // a Base64-encoded SHA1withRSA signature. When you set SignatureVersion - // to 2. Amazon SNS uses a Base64-encoded SHA256withRSA signature. If the - // API response does not include the SignatureVersion attribute, it means - // that the SignatureVersion for the topic has value 1. + // * SignatureVersion – The signature version corresponds to the hashing + // algorithm used while creating the signature of the notifications, subscription + // confirmations, or unsubscribe confirmation messages sent by Amazon SNS. + // By default, SignatureVersion is set to 1. The signature is a Base64-encoded + // SHA1withRSA signature. When you set SignatureVersion to 2. Amazon SNS + // uses a Base64-encoded SHA256withRSA signature. If the API response does + // not include the SignatureVersion attribute, it means that the SignatureVersion + // for the topic has value 1. // // * SubscriptionsConfirmed – The number of confirmed subscriptions for // the topic. @@ -8926,6 +8928,10 @@ type SetTopicAttributesInput struct { // The following lists the names, descriptions, and values of the special request // parameters that the SetTopicAttributes action uses: // + // * ApplicationSuccessFeedbackRoleArn – Indicates failed message delivery + // status for an Amazon SNS topic that is subscribed to a platform application + // endpoint. + // // * DeliveryPolicy – The policy that defines how Amazon SNS retries failed // deliveries to HTTP/S endpoints. // @@ -8941,6 +8947,59 @@ type SetTopicAttributesInput struct { // if the sampled flag in the tracing header is true. This is only supported // on standard topics. // + // * HTTP HTTPSuccessFeedbackRoleArn – Indicates successful message delivery + // status for an Amazon SNS topic that is subscribed to an HTTP endpoint. + // HTTPSuccessFeedbackSampleRate – Indicates percentage of successful messages + // to sample for an Amazon SNS topic that is subscribed to an HTTP endpoint. + // HTTPFailureFeedbackRoleArn – Indicates failed message delivery status + // for an Amazon SNS topic that is subscribed to an HTTP endpoint. + // + // * Amazon Kinesis Data Firehose FirehoseSuccessFeedbackRoleArn – Indicates + // successful message delivery status for an Amazon SNS topic that is subscribed + // to an Amazon Kinesis Data Firehose endpoint. FirehoseSuccessFeedbackSampleRate + // – Indicates percentage of successful messages to sample for an Amazon + // SNS topic that is subscribed to an Amazon Kinesis Data Firehose endpoint. + // FirehoseFailureFeedbackRoleArn – Indicates failed message delivery status + // for an Amazon SNS topic that is subscribed to an Amazon Kinesis Data Firehose + // endpoint. + // + // * Lambda LambdaSuccessFeedbackRoleArn – Indicates successful message + // delivery status for an Amazon SNS topic that is subscribed to an Lambda + // endpoint. LambdaSuccessFeedbackSampleRate – Indicates percentage of + // successful messages to sample for an Amazon SNS topic that is subscribed + // to an Lambda endpoint. LambdaFailureFeedbackRoleArn – Indicates failed + // message delivery status for an Amazon SNS topic that is subscribed to + // an Lambda endpoint. + // + // * Platform application endpoint ApplicationSuccessFeedbackRoleArn – + // Indicates successful message delivery status for an Amazon SNS topic that + // is subscribed to an Amazon Web Services application endpoint. ApplicationSuccessFeedbackSampleRate + // – Indicates percentage of successful messages to sample for an Amazon + // SNS topic that is subscribed to an Amazon Web Services application endpoint. + // ApplicationFailureFeedbackRoleArn – Indicates failed message delivery + // status for an Amazon SNS topic that is subscribed to an Amazon Web Services + // application endpoint. In addition to being able to configure topic attributes + // for message delivery status of notification messages sent to Amazon SNS + // application endpoints, you can also configure application attributes for + // the delivery status of push notification messages sent to push notification + // services. For example, For more information, see Using Amazon SNS Application + // Attributes for Message Delivery Status (https://docs.aws.amazon.com/sns/latest/dg/sns-msg-status.html). + // + // * Amazon SQS SQSSuccessFeedbackRoleArn – Indicates successful message + // delivery status for an Amazon SNS topic that is subscribed to an Amazon + // SQS endpoint. SQSSuccessFeedbackSampleRate – Indicates percentage of + // successful messages to sample for an Amazon SNS topic that is subscribed + // to an Amazon SQS endpoint. SQSFailureFeedbackRoleArn – Indicates failed + // message delivery status for an Amazon SNS topic that is subscribed to + // an Amazon SQS endpoint. + // + // The SuccessFeedbackRoleArn and FailureFeedbackRoleArn + // attributes are used to give Amazon SNS write access to use CloudWatch Logs + // on your behalf. The SuccessFeedbackSampleRate attribute is for + // specifying the sample rate percentage (0-100) of successfully delivered messages. + // After you configure the FailureFeedbackRoleArn attribute, then + // all failed message deliveries generate CloudWatch Logs. + // // The following attribute applies only to server-side-encryption (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html): // // * KmsMasterKeyId – The ID of an Amazon Web Services managed customer @@ -8952,6 +9011,7 @@ type SetTopicAttributesInput struct { // * SignatureVersion – The signature version corresponds to the hashing // algorithm used while creating the signature of the notifications, subscription // confirmations, or unsubscribe confirmation messages sent by Amazon SNS. + // By default, SignatureVersion is set to 1. // // The following attribute applies only to FIFO topics (https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html): // diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 8bc4bf34a09..a29c98eed22 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -216,12 +216,13 @@ var ( // Config is the top-level configuration for Prometheus's config files. type Config struct { - GlobalConfig GlobalConfig `yaml:"global"` - AlertingConfig AlertingConfig `yaml:"alerting,omitempty"` - RuleFiles []string `yaml:"rule_files,omitempty"` - ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` - StorageConfig StorageConfig `yaml:"storage,omitempty"` - TracingConfig TracingConfig `yaml:"tracing,omitempty"` + GlobalConfig GlobalConfig `yaml:"global"` + AlertingConfig AlertingConfig `yaml:"alerting,omitempty"` + RuleFiles []string `yaml:"rule_files,omitempty"` + ScrapeConfigFiles []string `yaml:"scrape_config_files,omitempty"` + ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` + StorageConfig StorageConfig `yaml:"storage,omitempty"` + TracingConfig TracingConfig `yaml:"tracing,omitempty"` RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"` RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"` @@ -235,6 +236,9 @@ func (c *Config) SetDirectory(dir string) { for i, file := range c.RuleFiles { c.RuleFiles[i] = config.JoinDir(dir, file) } + for i, file := range c.ScrapeConfigFiles { + c.ScrapeConfigFiles[i] = config.JoinDir(dir, file) + } for _, c := range c.ScrapeConfigs { c.SetDirectory(dir) } @@ -254,6 +258,58 @@ func (c Config) String() string { return string(b) } +// ScrapeConfigs returns the scrape configurations. +func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { + scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs)) + + jobNames := map[string]string{} + for i, scfg := range c.ScrapeConfigs { + // We do these checks for library users that would not call Validate in + // Unmarshal. + if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + return nil, err + } + + if _, ok := jobNames[scfg.JobName]; ok { + return nil, fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName) + } + jobNames[scfg.JobName] = "main config file" + scfgs[i] = scfg + } + for _, pat := range c.ScrapeConfigFiles { + fs, err := filepath.Glob(pat) + if err != nil { + // The only error can be a bad pattern. + return nil, fmt.Errorf("error retrieving scrape config files for %q: %w", pat, err) + } + for _, filename := range fs { + cfg := ScrapeConfigs{} + content, err := os.ReadFile(filename) + if err != nil { + return nil, fileErr(filename, err) + } + err = yaml.UnmarshalStrict(content, &cfg) + if err != nil { + return nil, fileErr(filename, err) + } + for _, scfg := range cfg.ScrapeConfigs { + if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + return nil, fileErr(filename, err) + } + + if f, ok := jobNames[scfg.JobName]; ok { + return nil, fileErr(filename, fmt.Errorf("found multiple scrape configs with job name %q, first found in %s", scfg.JobName, f)) + } + jobNames[scfg.JobName] = fmt.Sprintf("%q", filePath(filename)) + + scfg.SetDirectory(filepath.Dir(filename)) + scfgs = append(scfgs, scfg) + } + } + } + return scfgs, nil +} + // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { *c = DefaultConfig @@ -276,26 +332,18 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { return fmt.Errorf("invalid rule file path %q", rf) } } + + for _, sf := range c.ScrapeConfigFiles { + if !patRulePath.MatchString(sf) { + return fmt.Errorf("invalid scrape config file path %q", sf) + } + } + // Do global overrides and validate unique names. jobNames := map[string]struct{}{} for _, scfg := range c.ScrapeConfigs { - if scfg == nil { - return errors.New("empty or null scrape config section") - } - // First set the correct scrape interval, then check that the timeout - // (inferred or explicit) is not greater than that. - if scfg.ScrapeInterval == 0 { - scfg.ScrapeInterval = c.GlobalConfig.ScrapeInterval - } - if scfg.ScrapeTimeout > scfg.ScrapeInterval { - return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", scfg.JobName) - } - if scfg.ScrapeTimeout == 0 { - if c.GlobalConfig.ScrapeTimeout > scfg.ScrapeInterval { - scfg.ScrapeTimeout = scfg.ScrapeInterval - } else { - scfg.ScrapeTimeout = c.GlobalConfig.ScrapeTimeout - } + if err := scfg.Validate(c.GlobalConfig.ScrapeInterval, c.GlobalConfig.ScrapeTimeout); err != nil { + return err } if _, ok := jobNames[scfg.JobName]; ok { @@ -401,6 +449,10 @@ func (c *GlobalConfig) isZero() bool { c.QueryLogFile == "" } +type ScrapeConfigs struct { + ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` +} + // ScrapeConfig configures a scraping unit for Prometheus. type ScrapeConfig struct { // The job name to which the job label is set by default. @@ -494,6 +546,28 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } +func (c *ScrapeConfig) Validate(defaultInterval, defaultTimeout model.Duration) error { + if c == nil { + return errors.New("empty or null scrape config section") + } + // First set the correct scrape interval, then check that the timeout + // (inferred or explicit) is not greater than that. + if c.ScrapeInterval == 0 { + c.ScrapeInterval = defaultInterval + } + if c.ScrapeTimeout > c.ScrapeInterval { + return fmt.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", c.JobName) + } + if c.ScrapeTimeout == 0 { + if defaultTimeout > c.ScrapeInterval { + c.ScrapeTimeout = c.ScrapeInterval + } else { + c.ScrapeTimeout = defaultTimeout + } + } + return nil +} + // MarshalYAML implements the yaml.Marshaler interface. func (c *ScrapeConfig) MarshalYAML() (interface{}, error) { return discovery.MarshalYAMLWithInlineConfigs(c) @@ -936,3 +1010,15 @@ func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) erro // Thus we just do its validation here. return c.HTTPClientConfig.Validate() } + +func filePath(filename string) string { + absPath, err := filepath.Abs(filename) + if err != nil { + return filename + } + return absPath +} + +func fileErr(filename string, err error) error { + return fmt.Errorf("%q: %w", filePath(filename), err) +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go index 36a0e6cb358..056bc637403 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels.go @@ -11,16 +11,18 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !stringlabels + package labels import ( "bytes" "encoding/json" - "sort" "strconv" "github.com/cespare/xxhash/v2" "github.com/prometheus/common/model" + "golang.org/x/exp/slices" ) // Well-known label names used by Prometheus components. @@ -358,7 +360,7 @@ func EmptyLabels() Labels { func New(ls ...Label) Labels { set := make(Labels, 0, len(ls)) set = append(set, ls...) - sort.Sort(set) + slices.SortFunc(set, func(a, b Label) bool { return a.Name < b.Name }) return set } @@ -382,7 +384,7 @@ func FromStrings(ss ...string) Labels { res = append(res, Label{Name: ss[i], Value: ss[i+1]}) } - sort.Sort(res) + slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name }) return res } @@ -562,7 +564,7 @@ Outer: } if len(b.add) > 0 { // Base is already in order, so we only need to sort if we add to it. res = append(res, b.add...) - sort.Sort(res) + slices.SortFunc(res, func(a, b Label) bool { return a.Name < b.Name }) } return res } @@ -589,7 +591,7 @@ func (b *ScratchBuilder) Add(name, value string) { // Sort the labels added so far by name. func (b *ScratchBuilder) Sort() { - sort.Sort(b.add) + slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) } // Asssign is for when you already have a Labels which you want this ScratchBuilder to return. diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go new file mode 100644 index 00000000000..815e263ba56 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_string.go @@ -0,0 +1,788 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build stringlabels + +package labels + +import ( + "bytes" + "encoding/json" + "reflect" + "strconv" + "unsafe" + + "github.com/cespare/xxhash/v2" + "github.com/prometheus/common/model" + "golang.org/x/exp/slices" +) + +// Well-known label names used by Prometheus components. +const ( + MetricName = "__name__" + AlertName = "alertname" + BucketLabel = "le" + InstanceName = "instance" +) + +var seps = []byte{'\xff'} + +// Label is a key/value pair of strings. +type Label struct { + Name, Value string +} + +// Labels is implemented by a single flat string holding name/value pairs. +// Each name and value is preceded by its length in varint encoding. +// Names are in order. +type Labels struct { + data string +} + +type labelSlice []Label + +func (ls labelSlice) Len() int { return len(ls) } +func (ls labelSlice) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } +func (ls labelSlice) Less(i, j int) bool { return ls[i].Name < ls[j].Name } + +func decodeSize(data string, index int) (int, int) { + var size int + for shift := uint(0); ; shift += 7 { + // Just panic if we go of the end of data, since all Labels strings are constructed internally and + // malformed data indicates a bug, or memory corruption. + b := data[index] + index++ + size |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + return size, index +} + +func decodeString(data string, index int) (string, int) { + var size int + size, index = decodeSize(data, index) + return data[index : index+size], index + size +} + +func (ls Labels) String() string { + var b bytes.Buffer + + b.WriteByte('{') + for i := 0; i < len(ls.data); { + if i > 0 { + b.WriteByte(',') + b.WriteByte(' ') + } + var name, value string + name, i = decodeString(ls.data, i) + value, i = decodeString(ls.data, i) + b.WriteString(name) + b.WriteByte('=') + b.WriteString(strconv.Quote(value)) + } + b.WriteByte('}') + return b.String() +} + +// Bytes returns ls as a byte slice. +// It uses non-printing characters and so should not be used for printing. +func (ls Labels) Bytes(buf []byte) []byte { + if cap(buf) < len(ls.data) { + buf = make([]byte, len(ls.data)) + } else { + buf = buf[:len(ls.data)] + } + copy(buf, ls.data) + return buf +} + +// MarshalJSON implements json.Marshaler. +func (ls Labels) MarshalJSON() ([]byte, error) { + return json.Marshal(ls.Map()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (ls *Labels) UnmarshalJSON(b []byte) error { + var m map[string]string + + if err := json.Unmarshal(b, &m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// MarshalYAML implements yaml.Marshaler. +func (ls Labels) MarshalYAML() (interface{}, error) { + return ls.Map(), nil +} + +// IsZero implements yaml.IsZeroer - if we don't have this then 'omitempty' fields are always omitted. +func (ls Labels) IsZero() bool { + return len(ls.data) == 0 +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { + var m map[string]string + + if err := unmarshal(&m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. +// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. +// TODO: This is only used in printing an error message +func (ls Labels) MatchLabels(on bool, names ...string) Labels { + b := NewBuilder(ls) + if on { + b.Keep(names...) + } else { + b.Del(MetricName) + b.Del(names...) + } + return b.Labels(EmptyLabels()) +} + +// Hash returns a hash value for the label set. +// Note: the result is not guaranteed to be consistent across different runs of Prometheus. +func (ls Labels) Hash() uint64 { + return xxhash.Sum64(yoloBytes(ls.data)) +} + +// HashForLabels returns a hash value for the labels matching the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := 0; i < len(ls.data); { + var name, value string + name, i = decodeString(ls.data, i) + value, i = decodeString(ls.data, i) + for j < len(names) && names[j] < name { + j++ + } + if j == len(names) { + break + } + if name == names[j] { + b = append(b, name...) + b = append(b, seps[0]) + b = append(b, value...) + b = append(b, seps[0]) + } + } + + return xxhash.Sum64(b), b +} + +// HashWithoutLabels returns a hash value for all labels except those matching +// the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := 0; i < len(ls.data); { + var name, value string + name, i = decodeString(ls.data, i) + value, i = decodeString(ls.data, i) + for j < len(names) && names[j] < name { + j++ + } + if name == MetricName || (j < len(names) && name == names[j]) { + continue + } + b = append(b, name...) + b = append(b, seps[0]) + b = append(b, value...) + b = append(b, seps[0]) + } + return xxhash.Sum64(b), b +} + +// BytesWithLabels is just as Bytes(), but only for labels matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { + b := buf[:0] + j := 0 + for pos := 0; pos < len(ls.data); { + lName, newPos := decodeString(ls.data, pos) + _, newPos = decodeString(ls.data, newPos) + for j < len(names) && names[j] < lName { + j++ + } + if j == len(names) { + break + } + if lName == names[j] { + b = append(b, ls.data[pos:newPos]...) + } + pos = newPos + } + return b +} + +// BytesWithoutLabels is just as Bytes(), but only for labels not matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { + b := buf[:0] + j := 0 + for pos := 0; pos < len(ls.data); { + lName, newPos := decodeString(ls.data, pos) + _, newPos = decodeString(ls.data, newPos) + for j < len(names) && names[j] < lName { + j++ + } + if j == len(names) || lName != names[j] { + b = append(b, ls.data[pos:newPos]...) + } + pos = newPos + } + return b +} + +// Copy returns a copy of the labels. +func (ls Labels) Copy() Labels { + buf := append([]byte{}, ls.data...) + return Labels{data: yoloString(buf)} +} + +// Get returns the value for the label with the given name. +// Returns an empty string if the label doesn't exist. +func (ls Labels) Get(name string) string { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.data, i) + lValue, i = decodeString(ls.data, i) + if lName == name { + return lValue + } + } + return "" +} + +// Has returns true if the label with the given name is present. +func (ls Labels) Has(name string) bool { + for i := 0; i < len(ls.data); { + var lName string + lName, i = decodeString(ls.data, i) + _, i = decodeString(ls.data, i) + if lName == name { + return true + } + } + return false +} + +// HasDuplicateLabelNames returns whether ls has duplicate label names. +// It assumes that the labelset is sorted. +func (ls Labels) HasDuplicateLabelNames() (string, bool) { + var lName, prevName string + for i := 0; i < len(ls.data); { + lName, i = decodeString(ls.data, i) + _, i = decodeString(ls.data, i) + if lName == prevName { + return lName, true + } + prevName = lName + } + return "", false +} + +// WithoutEmpty returns the labelset without empty labels. +// May return the same labelset. +func (ls Labels) WithoutEmpty() Labels { + for pos := 0; pos < len(ls.data); { + _, newPos := decodeString(ls.data, pos) + lValue, newPos := decodeString(ls.data, newPos) + if lValue != "" { + pos = newPos + continue + } + // Do not copy the slice until it's necessary. + // TODO: could optimise the case where all blanks are at the end. + // Note: we size the new buffer on the assumption there is exactly one blank value. + buf := make([]byte, pos, pos+(len(ls.data)-newPos)) + copy(buf, ls.data[:pos]) // copy the initial non-blank labels + pos = newPos // move past the first blank value + for pos < len(ls.data) { + var newPos int + _, newPos = decodeString(ls.data, pos) + lValue, newPos = decodeString(ls.data, newPos) + if lValue != "" { + buf = append(buf, ls.data[pos:newPos]...) + } + pos = newPos + } + return Labels{data: yoloString(buf)} + } + return ls +} + +// IsValid checks if the metric name or label names are valid. +func (ls Labels) IsValid() bool { + err := ls.Validate(func(l Label) error { + if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { + return strconv.ErrSyntax + } + if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { + return strconv.ErrSyntax + } + return nil + }) + return err == nil +} + +// Equal returns whether the two label sets are equal. +func Equal(ls, o Labels) bool { + return ls.data == o.data +} + +// Map returns a string map of the labels. +func (ls Labels) Map() map[string]string { + m := make(map[string]string, len(ls.data)/10) + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.data, i) + lValue, i = decodeString(ls.data, i) + m[lName] = lValue + } + return m +} + +// EmptyLabels returns an empty Labels value, for convenience. +func EmptyLabels() Labels { + return Labels{} +} + +func yoloString(b []byte) string { + return *((*string)(unsafe.Pointer(&b))) +} + +func yoloBytes(s string) (b []byte) { + *(*string)(unsafe.Pointer(&b)) = s + (*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s) + return +} + +// New returns a sorted Labels from the given labels. +// The caller has to guarantee that all label names are unique. +func New(ls ...Label) Labels { + slices.SortFunc(ls, func(a, b Label) bool { return a.Name < b.Name }) + size := labelsSize(ls) + buf := make([]byte, size) + marshalLabelsToSizedBuffer(ls, buf) + return Labels{data: yoloString(buf)} +} + +// FromMap returns new sorted Labels from the given map. +func FromMap(m map[string]string) Labels { + l := make([]Label, 0, len(m)) + for k, v := range m { + l = append(l, Label{Name: k, Value: v}) + } + return New(l...) +} + +// FromStrings creates new labels from pairs of strings. +func FromStrings(ss ...string) Labels { + if len(ss)%2 != 0 { + panic("invalid number of strings") + } + ls := make([]Label, 0, len(ss)/2) + for i := 0; i < len(ss); i += 2 { + ls = append(ls, Label{Name: ss[i], Value: ss[i+1]}) + } + + slices.SortFunc(ls, func(a, b Label) bool { return a.Name < b.Name }) + return New(ls...) +} + +// Compare compares the two label sets. +// The result will be 0 if a==b, <0 if a < b, and >0 if a > b. +// TODO: replace with Less function - Compare is never needed. +// TODO: just compare the underlying strings when we don't need alphanumeric sorting. +func Compare(a, b Labels) int { + l := len(a.data) + if len(b.data) < l { + l = len(b.data) + } + + ia, ib := 0, 0 + for ia < l { + var aName, bName string + aName, ia = decodeString(a.data, ia) + bName, ib = decodeString(b.data, ib) + if aName != bName { + if aName < bName { + return -1 + } + return 1 + } + var aValue, bValue string + aValue, ia = decodeString(a.data, ia) + bValue, ib = decodeString(b.data, ib) + if aValue != bValue { + if aValue < bValue { + return -1 + } + return 1 + } + } + // If all labels so far were in common, the set with fewer labels comes first. + return len(a.data) - len(b.data) +} + +// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +func (ls *Labels) CopyFrom(b Labels) { + ls.data = b.data // strings are immutable +} + +// IsEmpty returns true if ls represents an empty set of labels. +func (ls Labels) IsEmpty() bool { + return len(ls.data) == 0 +} + +// Len returns the number of labels; it is relatively slow. +func (ls Labels) Len() int { + count := 0 + for i := 0; i < len(ls.data); { + var size int + size, i = decodeSize(ls.data, i) + i += size + size, i = decodeSize(ls.data, i) + i += size + count++ + } + return count +} + +// Range calls f on each label. +func (ls Labels) Range(f func(l Label)) { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.data, i) + lValue, i = decodeString(ls.data, i) + f(Label{Name: lName, Value: lValue}) + } +} + +// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. +func (ls Labels) Validate(f func(l Label) error) error { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.data, i) + lValue, i = decodeString(ls.data, i) + err := f(Label{Name: lName, Value: lValue}) + if err != nil { + return err + } + } + return nil +} + +// InternStrings calls intern on every string value inside ls, replacing them with what it returns. +func (ls *Labels) InternStrings(intern func(string) string) { + ls.data = intern(ls.data) +} + +// ReleaseStrings calls release on every string value inside ls. +func (ls Labels) ReleaseStrings(release func(string)) { + release(ls.data) +} + +// Builder allows modifying Labels. +type Builder struct { + base Labels + del []string + add []Label +} + +// NewBuilder returns a new LabelsBuilder. +func NewBuilder(base Labels) *Builder { + b := &Builder{ + del: make([]string, 0, 5), + add: make([]Label, 0, 5), + } + b.Reset(base) + return b +} + +// Reset clears all current state for the builder. +func (b *Builder) Reset(base Labels) { + b.base = base + b.del = b.del[:0] + b.add = b.add[:0] + for i := 0; i < len(base.data); { + var lName, lValue string + lName, i = decodeString(base.data, i) + lValue, i = decodeString(base.data, i) + if lValue == "" { + b.del = append(b.del, lName) + } + } +} + +// Del deletes the label of the given name. +func (b *Builder) Del(ns ...string) *Builder { + for _, n := range ns { + for i, a := range b.add { + if a.Name == n { + b.add = append(b.add[:i], b.add[i+1:]...) + } + } + b.del = append(b.del, n) + } + return b +} + +// Keep removes all labels from the base except those with the given names. +func (b *Builder) Keep(ns ...string) *Builder { +Outer: + for i := 0; i < len(b.base.data); { + var lName string + lName, i = decodeString(b.base.data, i) + _, i = decodeString(b.base.data, i) + for _, n := range ns { + if lName == n { + continue Outer + } + } + b.del = append(b.del, lName) + } + return b +} + +// Set the name/value pair as a label. A value of "" means delete that label. +func (b *Builder) Set(n, v string) *Builder { + if v == "" { + // Empty labels are the same as missing labels. + return b.Del(n) + } + for i, a := range b.add { + if a.Name == n { + b.add[i].Value = v + return b + } + } + b.add = append(b.add, Label{Name: n, Value: v}) + + return b +} + +// Labels returns the labels from the builder, adding them to res if non-nil. +// Argument res can be the same as b.base, if caller wants to overwrite that slice. +// If no modifications were made, the original labels are returned. +func (b *Builder) Labels(res Labels) Labels { + if len(b.del) == 0 && len(b.add) == 0 { + return b.base + } + + slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) + slices.Sort(b.del) + a, d := 0, 0 + + bufSize := len(b.base.data) + labelsSize(b.add) + buf := make([]byte, 0, bufSize) // TODO: see if we can re-use the buffer from res. + for pos := 0; pos < len(b.base.data); { + oldPos := pos + var lName string + lName, pos = decodeString(b.base.data, pos) + _, pos = decodeString(b.base.data, pos) + for d < len(b.del) && b.del[d] < lName { + d++ + } + if d < len(b.del) && b.del[d] == lName { + continue // This label has been deleted. + } + for ; a < len(b.add) && b.add[a].Name < lName; a++ { + buf = appendLabelTo(buf, &b.add[a]) // Insert label that was not in the base set. + } + if a < len(b.add) && b.add[a].Name == lName { + buf = appendLabelTo(buf, &b.add[a]) + a++ + continue // This label has been replaced. + } + buf = append(buf, b.base.data[oldPos:pos]...) + } + // We have come to the end of the base set; add any remaining labels. + for ; a < len(b.add); a++ { + buf = appendLabelTo(buf, &b.add[a]) + } + return Labels{data: yoloString(buf)} +} + +func marshalLabelsToSizedBuffer(lbls []Label, data []byte) int { + i := len(data) + for index := len(lbls) - 1; index >= 0; index-- { + size := marshalLabelToSizedBuffer(&lbls[index], data[:i]) + i -= size + } + return len(data) - i +} + +func marshalLabelToSizedBuffer(m *Label, data []byte) int { + i := len(data) + i -= len(m.Value) + copy(data[i:], m.Value) + i = encodeSize(data, i, len(m.Value)) + i -= len(m.Name) + copy(data[i:], m.Name) + i = encodeSize(data, i, len(m.Name)) + return len(data) - i +} + +func sizeVarint(x uint64) (n int) { + // Most common case first + if x < 1<<7 { + return 1 + } + if x >= 1<<56 { + return 9 + } + if x >= 1<<28 { + x >>= 28 + n = 4 + } + if x >= 1<<14 { + x >>= 14 + n += 2 + } + if x >= 1<<7 { + n++ + } + return n + 1 +} + +func encodeVarint(data []byte, offset int, v uint64) int { + offset -= sizeVarint(v) + base := offset + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return base +} + +// Special code for the common case that a size is less than 128 +func encodeSize(data []byte, offset, v int) int { + if v < 1<<7 { + offset-- + data[offset] = uint8(v) + return offset + } + return encodeVarint(data, offset, uint64(v)) +} + +func labelsSize(lbls []Label) (n int) { + // we just encode name/value/name/value, without any extra tags or length bytes + for _, e := range lbls { + n += labelSize(&e) + } + return n +} + +func labelSize(m *Label) (n int) { + // strings are encoded as length followed by contents. + l := len(m.Name) + n += l + sizeVarint(uint64(l)) + l = len(m.Value) + n += l + sizeVarint(uint64(l)) + return n +} + +func appendLabelTo(buf []byte, m *Label) []byte { + size := labelSize(m) + sizeRequired := len(buf) + size + if cap(buf) >= sizeRequired { + buf = buf[:sizeRequired] + } else { + bufSize := cap(buf) + // Double size of buffer each time it needs to grow, to amortise copying cost. + for bufSize < sizeRequired { + bufSize = bufSize*2 + 1 + } + newBuf := make([]byte, sizeRequired, bufSize) + copy(newBuf, buf) + buf = newBuf + } + marshalLabelToSizedBuffer(m, buf) + return buf +} + +// ScratchBuilder allows efficient construction of a Labels from scratch. +type ScratchBuilder struct { + add []Label + output Labels + overwriteBuffer []byte +} + +// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. +func NewScratchBuilder(n int) ScratchBuilder { + return ScratchBuilder{add: make([]Label, 0, n)} +} + +func (b *ScratchBuilder) Reset() { + b.add = b.add[:0] + b.output = EmptyLabels() +} + +// Add a name/value pair. +// Note if you Add the same name twice you will get a duplicate label, which is invalid. +func (b *ScratchBuilder) Add(name, value string) { + b.add = append(b.add, Label{Name: name, Value: value}) +} + +// Sort the labels added so far by name. +func (b *ScratchBuilder) Sort() { + slices.SortFunc(b.add, func(a, b Label) bool { return a.Name < b.Name }) +} + +// Asssign is for when you already have a Labels which you want this ScratchBuilder to return. +func (b *ScratchBuilder) Assign(l Labels) { + b.output = l +} + +// Labels returns the name/value pairs added as a Labels object. Calling Add() after Labels() has no effect. +// Note: if you want them sorted, call Sort() first. +func (b *ScratchBuilder) Labels() Labels { + if b.output.IsEmpty() { + size := labelsSize(b.add) + buf := make([]byte, size) + marshalLabelsToSizedBuffer(b.add, buf) + b.output = Labels{data: yoloString(buf)} + } + return b.output +} + +// Write the newly-built Labels out to ls, reusing an internal buffer. +// Callers must ensure that there are no other references to ls. +func (b *ScratchBuilder) Overwrite(ls *Labels) { + size := labelsSize(b.add) + if size <= cap(b.overwriteBuffer) { + b.overwriteBuffer = b.overwriteBuffer[:size] + } else { + b.overwriteBuffer = make([]byte, size) + } + marshalLabelsToSizedBuffer(b.add, b.overwriteBuffer) + ls.data = yoloString(b.overwriteBuffer) +} diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index 0cc6eeeb7ed..eb05817c0c5 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -15,6 +15,7 @@ package relabel import ( "crypto/md5" + "encoding/binary" "fmt" "strings" @@ -268,7 +269,9 @@ func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) (ret labels.La case Uppercase: lb.Set(cfg.TargetLabel, strings.ToUpper(val)) case HashMod: - mod := sum64(md5.Sum([]byte(val))) % cfg.Modulus + hash := md5.Sum([]byte(val)) + // Use only the last 8 bytes of the hash to give the same result as earlier versions of this code. + mod := binary.BigEndian.Uint64(hash[8:]) % cfg.Modulus lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod)) case LabelMap: lset.Range(func(l labels.Label) { @@ -295,15 +298,3 @@ func relabel(lset labels.Labels, cfg *Config, lb *labels.Builder) (ret labels.La return lb.Labels(lset), true } - -// sum64 sums the md5 hash to an uint64. -func sum64(hash [md5.Size]byte) uint64 { - var s uint64 - - for i, b := range hash { - shift := uint64((md5.Size - i - 1) * 8) - - s |= uint64(b) << shift - } - return s -} diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go index 15a95a95924..c17d40020a6 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -17,7 +17,6 @@ package textparse import ( - "bytes" "errors" "fmt" "io" @@ -31,8 +30,6 @@ import ( "github.com/prometheus/prometheus/model/value" ) -var allowedSuffixes = [][]byte{[]byte("_total"), []byte("_bucket")} - type openMetricsLexer struct { b []byte i int @@ -46,13 +43,6 @@ func (l *openMetricsLexer) buf() []byte { return l.b[l.start:l.i] } -func (l *openMetricsLexer) cur() byte { - if l.i < len(l.b) { - return l.b[l.i] - } - return byte(' ') -} - // next advances the openMetricsLexer to the next character. func (l *openMetricsLexer) next() byte { l.i++ @@ -223,6 +213,14 @@ func (p *OpenMetricsParser) nextToken() token { return tok } +func (p *OpenMetricsParser) parseError(exp string, got token) error { + e := p.l.i + 1 + if len(p.l.b) < e { + e = len(p.l.b) + } + return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e]) +} + // Next advances the parser to the next sample. It returns false if no // more samples were read or an error occurred. func (p *OpenMetricsParser) Next() (Entry, error) { @@ -248,7 +246,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { case tMName: p.offsets = append(p.offsets, p.l.start, p.l.i) default: - return EntryInvalid, parseError("expected metric name after "+t.String(), t2) + return EntryInvalid, p.parseError("expected metric name after "+t.String(), t2) } switch t2 := p.nextToken(); t2 { case tText: @@ -284,7 +282,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { } case tHelp: if !utf8.Valid(p.text) { - return EntryInvalid, errors.New("help text is not a valid utf8 string") + return EntryInvalid, fmt.Errorf("help text %q is not a valid utf8 string", p.text) } } switch t { @@ -297,7 +295,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { u := yoloString(p.text) if len(u) > 0 { if !strings.HasSuffix(m, u) || len(m) < len(u)+1 || p.l.b[p.offsets[1]-len(u)-1] != '_' { - return EntryInvalid, fmt.Errorf("unit not a suffix of metric %q", m) + return EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", u, m) } } return EntryUnit, nil @@ -336,10 +334,10 @@ func (p *OpenMetricsParser) Next() (Entry, error) { var ts float64 // A float is enough to hold what we need for millisecond resolution. if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { - return EntryInvalid, err + return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) } if math.IsNaN(ts) || math.IsInf(ts, 0) { - return EntryInvalid, errors.New("invalid timestamp") + return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts) } p.ts = int64(ts * 1000) switch t3 := p.nextToken(); t3 { @@ -349,26 +347,20 @@ func (p *OpenMetricsParser) Next() (Entry, error) { return EntryInvalid, err } default: - return EntryInvalid, parseError("expected next entry after timestamp", t3) + return EntryInvalid, p.parseError("expected next entry after timestamp", t3) } default: - return EntryInvalid, parseError("expected timestamp or # symbol", t2) + return EntryInvalid, p.parseError("expected timestamp or # symbol", t2) } return EntrySeries, nil default: - err = fmt.Errorf("%q %q is not a valid start token", t, string(p.l.cur())) + err = p.parseError("expected a valid start token", t) } return EntryInvalid, err } func (p *OpenMetricsParser) parseComment() error { - // Validate the name of the metric. It must have _total or _bucket as - // suffix for exemplars to be supported. - if err := p.validateNameForExemplar(p.series[:p.offsets[0]-p.start]); err != nil { - return err - } - var err error // Parse the labels. p.eOffsets, err = p.parseLVals(p.eOffsets) @@ -395,19 +387,19 @@ func (p *OpenMetricsParser) parseComment() error { var ts float64 // A float is enough to hold what we need for millisecond resolution. if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { - return err + return fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) } if math.IsNaN(ts) || math.IsInf(ts, 0) { - return errors.New("invalid exemplar timestamp") + return fmt.Errorf("invalid exemplar timestamp %f", ts) } p.exemplarTs = int64(ts * 1000) switch t3 := p.nextToken(); t3 { case tLinebreak: default: - return parseError("expected next entry after exemplar timestamp", t3) + return p.parseError("expected next entry after exemplar timestamp", t3) } default: - return parseError("expected timestamp or comment", t2) + return p.parseError("expected timestamp or comment", t2) } return nil } @@ -421,21 +413,21 @@ func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) { return offsets, nil case tComma: if first { - return nil, parseError("expected label name or left brace", t) + return nil, p.parseError("expected label name or left brace", t) } t = p.nextToken() if t != tLName { - return nil, parseError("expected label name", t) + return nil, p.parseError("expected label name", t) } case tLName: if !first { - return nil, parseError("expected comma", t) + return nil, p.parseError("expected comma", t) } default: if first { - return nil, parseError("expected label name or left brace", t) + return nil, p.parseError("expected label name or left brace", t) } - return nil, parseError("expected comma or left brace", t) + return nil, p.parseError("expected comma or left brace", t) } first = false @@ -444,13 +436,13 @@ func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) { offsets = append(offsets, p.l.start, p.l.i) if t := p.nextToken(); t != tEqual { - return nil, parseError("expected equal", t) + return nil, p.parseError("expected equal", t) } if t := p.nextToken(); t != tLValue { - return nil, parseError("expected label value", t) + return nil, p.parseError("expected label value", t) } if !utf8.Valid(p.l.buf()) { - return nil, errors.New("invalid UTF-8 label value") + return nil, fmt.Errorf("invalid UTF-8 label value: %q", p.l.buf()) } // The openMetricsLexer ensures the value string is quoted. Strip first @@ -461,11 +453,11 @@ func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) { func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) { if t != tValue { - return 0, parseError(fmt.Sprintf("expected value after %v", after), t) + return 0, p.parseError(fmt.Sprintf("expected value after %v", after), t) } val, err := parseFloat(yoloString(p.l.buf()[1:])) if err != nil { - return 0, err + return 0, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) } // Ensure canonical NaN value. if math.IsNaN(p.exemplarVal) { @@ -473,12 +465,3 @@ func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error } return val, nil } - -func (p *OpenMetricsParser) validateNameForExemplar(name []byte) error { - for _, suffix := range allowedSuffixes { - if bytes.HasSuffix(name, suffix) { - return nil - } - } - return fmt.Errorf("metric name %v does not support exemplars", string(name)) -} diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go index b0c963392d2..2c981f050e4 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go @@ -254,8 +254,12 @@ func (p *PromParser) nextToken() token { } } -func parseError(exp string, got token) error { - return fmt.Errorf("%s, got %q", exp, got) +func (p *PromParser) parseError(exp string, got token) error { + e := p.l.i + 1 + if len(p.l.b) < e { + e = len(p.l.b) + } + return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e]) } // Next advances the parser to the next sample. It returns false if no @@ -278,7 +282,7 @@ func (p *PromParser) Next() (Entry, error) { case tMName: p.offsets = append(p.offsets, p.l.start, p.l.i) default: - return EntryInvalid, parseError("expected metric name after "+t.String(), t2) + return EntryInvalid, p.parseError("expected metric name after "+t.String(), t2) } switch t2 := p.nextToken(); t2 { case tText: @@ -308,11 +312,11 @@ func (p *PromParser) Next() (Entry, error) { } case tHelp: if !utf8.Valid(p.text) { - return EntryInvalid, fmt.Errorf("help text is not a valid utf8 string") + return EntryInvalid, fmt.Errorf("help text %q is not a valid utf8 string", p.text) } } if t := p.nextToken(); t != tLinebreak { - return EntryInvalid, parseError("linebreak expected after metadata", t) + return EntryInvalid, p.parseError("linebreak expected after metadata", t) } switch t { case tHelp: @@ -323,7 +327,7 @@ func (p *PromParser) Next() (Entry, error) { case tComment: p.text = p.l.buf() if t := p.nextToken(); t != tLinebreak { - return EntryInvalid, parseError("linebreak expected after comment", t) + return EntryInvalid, p.parseError("linebreak expected after comment", t) } return EntryComment, nil @@ -340,10 +344,10 @@ func (p *PromParser) Next() (Entry, error) { t2 = p.nextToken() } if t2 != tValue { - return EntryInvalid, parseError("expected value after metric", t2) + return EntryInvalid, p.parseError("expected value after metric", t2) } if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil { - return EntryInvalid, err + return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) } // Ensure canonical NaN value. if math.IsNaN(p.val) { @@ -356,18 +360,18 @@ func (p *PromParser) Next() (Entry, error) { case tTimestamp: p.hasTS = true if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil { - return EntryInvalid, err + return EntryInvalid, fmt.Errorf("%v while parsing: %q", err, p.l.b[p.start:p.l.i]) } if t2 := p.nextToken(); t2 != tLinebreak { - return EntryInvalid, parseError("expected next entry after timestamp", t2) + return EntryInvalid, p.parseError("expected next entry after timestamp", t2) } default: - return EntryInvalid, parseError("expected timestamp or new record", t) + return EntryInvalid, p.parseError("expected timestamp or new record", t) } return EntrySeries, nil default: - err = fmt.Errorf("%q is not a valid start token", t) + err = p.parseError("expected a valid start token", t) } return EntryInvalid, err } @@ -380,18 +384,18 @@ func (p *PromParser) parseLVals() error { return nil case tLName: default: - return parseError("expected label name", t) + return p.parseError("expected label name", t) } p.offsets = append(p.offsets, p.l.start, p.l.i) if t := p.nextToken(); t != tEqual { - return parseError("expected equal", t) + return p.parseError("expected equal", t) } if t := p.nextToken(); t != tLValue { - return parseError("expected label value", t) + return p.parseError("expected label value", t) } if !utf8.Valid(p.l.buf()) { - return fmt.Errorf("invalid UTF-8 label value") + return fmt.Errorf("invalid UTF-8 label value: %q", p.l.buf()) } // The promlexer ensures the value string is quoted. Strip first diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go index 1be98a2f772..3e4bc7df8fa 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go @@ -10,6 +10,7 @@ import ( math "math" math_bits "math/bits" + _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" types "github.com/gogo/protobuf/types" ) @@ -281,12 +282,12 @@ func (m *Quantile) GetValue() float64 { } type Summary struct { - SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` - SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` + SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` + Quantile []Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Summary) Reset() { *m = Summary{} } @@ -336,7 +337,7 @@ func (m *Summary) GetSampleSum() float64 { return 0 } -func (m *Summary) GetQuantile() []*Quantile { +func (m *Summary) GetQuantile() []Quantile { if m != nil { return m.Quantile } @@ -395,7 +396,7 @@ type Histogram struct { SampleCountFloat float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat,proto3" json:"sample_count_float,omitempty"` SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` // Buckets for the conventional histogram. - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket,omitempty"` + Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"` // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and // then each power of two is divided into 2^n logarithmic buckets. @@ -406,14 +407,14 @@ type Histogram struct { ZeroCount uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"` ZeroCountFloat float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat,proto3" json:"zero_count_float,omitempty"` // Negative buckets for the native histogram. - NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan,proto3" json:"negative_span,omitempty"` + NegativeSpan []BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan,proto3" json:"negative_span"` // Use either "negative_delta" or "negative_count", the former for // regular histograms with integer counts, the latter for float // histograms. NegativeDelta []int64 `protobuf:"zigzag64,10,rep,packed,name=negative_delta,json=negativeDelta,proto3" json:"negative_delta,omitempty"` NegativeCount []float64 `protobuf:"fixed64,11,rep,packed,name=negative_count,json=negativeCount,proto3" json:"negative_count,omitempty"` // Positive buckets for the native histogram. - PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan,proto3" json:"positive_span,omitempty"` + PositiveSpan []BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan,proto3" json:"positive_span"` // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float // histograms. @@ -478,7 +479,7 @@ func (m *Histogram) GetSampleSum() float64 { return 0 } -func (m *Histogram) GetBucket() []*Bucket { +func (m *Histogram) GetBucket() []Bucket { if m != nil { return m.Bucket } @@ -513,7 +514,7 @@ func (m *Histogram) GetZeroCountFloat() float64 { return 0 } -func (m *Histogram) GetNegativeSpan() []*BucketSpan { +func (m *Histogram) GetNegativeSpan() []BucketSpan { if m != nil { return m.NegativeSpan } @@ -534,7 +535,7 @@ func (m *Histogram) GetNegativeCount() []float64 { return nil } -func (m *Histogram) GetPositiveSpan() []*BucketSpan { +func (m *Histogram) GetPositiveSpan() []BucketSpan { if m != nil { return m.PositiveSpan } @@ -688,7 +689,7 @@ func (m *BucketSpan) GetLength() uint32 { } type Exemplar struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label,omitempty"` + Label []LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label"` Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` Timestamp *types.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -729,7 +730,7 @@ func (m *Exemplar) XXX_DiscardUnknown() { var xxx_messageInfo_Exemplar proto.InternalMessageInfo -func (m *Exemplar) GetLabel() []*LabelPair { +func (m *Exemplar) GetLabel() []LabelPair { if m != nil { return m.Label } @@ -751,16 +752,16 @@ func (m *Exemplar) GetTimestamp() *types.Timestamp { } type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge,proto3" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter,proto3" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary,proto3" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped,proto3" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram,proto3" json:"histogram,omitempty"` - TimestampMs int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Label []LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge,proto3" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter,proto3" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary,proto3" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped,proto3" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram,proto3" json:"histogram,omitempty"` + TimestampMs int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Metric) Reset() { *m = Metric{} } @@ -796,7 +797,7 @@ func (m *Metric) XXX_DiscardUnknown() { var xxx_messageInfo_Metric proto.InternalMessageInfo -func (m *Metric) GetLabel() []*LabelPair { +func (m *Metric) GetLabel() []LabelPair { if m != nil { return m.Label } @@ -849,7 +850,7 @@ type MetricFamily struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Help string `protobuf:"bytes,2,opt,name=help,proto3" json:"help,omitempty"` Type MetricType `protobuf:"varint,3,opt,name=type,proto3,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric,proto3" json:"metric,omitempty"` + Metric []Metric `protobuf:"bytes,4,rep,name=metric,proto3" json:"metric"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -909,7 +910,7 @@ func (m *MetricFamily) GetType() MetricType { return MetricType_COUNTER } -func (m *MetricFamily) GetMetric() []*Metric { +func (m *MetricFamily) GetMetric() []Metric { if m != nil { return m.Metric } @@ -937,63 +938,65 @@ func init() { } var fileDescriptor_d1e5ddb18987a258 = []byte{ - // 894 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xdd, 0x6e, 0xe3, 0x44, - 0x18, 0xc5, 0xcd, 0xaf, 0xbf, 0x34, 0xdd, 0xec, 0x50, 0xad, 0xac, 0x42, 0xdb, 0x60, 0x09, 0xa9, - 0x20, 0xe4, 0x08, 0xe8, 0x0a, 0x84, 0xe0, 0xa2, 0xdd, 0xcd, 0x76, 0x91, 0xc8, 0xee, 0x32, 0x49, - 0x2e, 0x16, 0x2e, 0xac, 0x49, 0x3a, 0x4d, 0x2c, 0x3c, 0x1e, 0x63, 0x8f, 0x57, 0x94, 0x17, 0xe0, - 0x9a, 0x57, 0xe0, 0x61, 0x10, 0x97, 0x3c, 0x02, 0x2a, 0x0f, 0x02, 0x9a, 0x3f, 0xbb, 0x59, 0x39, - 0xcb, 0xb2, 0x77, 0x99, 0xe3, 0x73, 0xbe, 0x39, 0x67, 0x3c, 0x39, 0x06, 0x3f, 0xe2, 0xa3, 0x34, - 0xe3, 0x8c, 0x8a, 0x35, 0x2d, 0xf2, 0xd1, 0x32, 0x8e, 0x68, 0x22, 0x46, 0x8c, 0x8a, 0x2c, 0x5a, - 0xe6, 0x41, 0x9a, 0x71, 0xc1, 0xd1, 0x7e, 0xc4, 0x83, 0x8a, 0x13, 0x68, 0xce, 0xc1, 0xf1, 0x8a, - 0xf3, 0x55, 0x4c, 0x47, 0x8a, 0xb3, 0x28, 0xae, 0x46, 0x22, 0x62, 0x34, 0x17, 0x84, 0xa5, 0x5a, - 0xe6, 0xdf, 0x07, 0xf7, 0x1b, 0xb2, 0xa0, 0xf1, 0x33, 0x12, 0x65, 0x08, 0x41, 0x33, 0x21, 0x8c, - 0x7a, 0xce, 0xd0, 0x39, 0x71, 0xb1, 0xfa, 0x8d, 0xf6, 0xa1, 0xf5, 0x82, 0xc4, 0x05, 0xf5, 0x76, - 0x14, 0xa8, 0x17, 0xfe, 0x21, 0xb4, 0x2e, 0x48, 0xb1, 0xba, 0xf5, 0x58, 0x6a, 0x1c, 0xfb, 0xf8, - 0x7b, 0xe8, 0x3c, 0xe0, 0x45, 0x22, 0x68, 0x56, 0x4f, 0x40, 0x5f, 0x40, 0x97, 0xfe, 0x44, 0x59, - 0x1a, 0x93, 0x4c, 0x0d, 0xee, 0x7d, 0x72, 0x14, 0xd4, 0x05, 0x08, 0xc6, 0x86, 0x85, 0x4b, 0xbe, - 0xff, 0x25, 0x74, 0xbf, 0x2d, 0x48, 0x22, 0xa2, 0x98, 0xa2, 0x03, 0xe8, 0xfe, 0x68, 0x7e, 0x9b, - 0x0d, 0xca, 0xf5, 0xa6, 0xf3, 0xd2, 0xda, 0x2f, 0x0e, 0x74, 0xa6, 0x05, 0x63, 0x24, 0xbb, 0x46, - 0xef, 0xc1, 0x6e, 0x4e, 0x58, 0x1a, 0xd3, 0x70, 0x29, 0xdd, 0xaa, 0x09, 0x4d, 0xdc, 0xd3, 0x98, - 0x0a, 0x80, 0x0e, 0x01, 0x0c, 0x25, 0x2f, 0x98, 0x99, 0xe4, 0x6a, 0x64, 0x5a, 0x30, 0x99, 0xa3, - 0xdc, 0xbf, 0x31, 0x6c, 0x6c, 0xcf, 0x61, 0x1d, 0x57, 0xfe, 0xfc, 0x63, 0xe8, 0xcc, 0x13, 0x71, - 0x9d, 0xd2, 0xcb, 0x2d, 0xa7, 0xf8, 0x77, 0x13, 0xdc, 0xc7, 0x51, 0x2e, 0xf8, 0x2a, 0x23, 0xec, - 0x75, 0xcc, 0x7e, 0x04, 0xe8, 0x36, 0x25, 0xbc, 0x8a, 0x39, 0x11, 0x5e, 0x53, 0xcd, 0x1c, 0xdc, - 0x22, 0x3e, 0x92, 0xf8, 0x7f, 0x45, 0x3b, 0x85, 0xf6, 0xa2, 0x58, 0xfe, 0x40, 0x85, 0x09, 0xf6, - 0x6e, 0x7d, 0xb0, 0x73, 0xc5, 0xc1, 0x86, 0x8b, 0xee, 0x41, 0x3b, 0x5f, 0xae, 0x29, 0x23, 0x5e, - 0x6b, 0xe8, 0x9c, 0xdc, 0xc5, 0x66, 0x85, 0xde, 0x87, 0xbd, 0x9f, 0x69, 0xc6, 0x43, 0xb1, 0xce, - 0x68, 0xbe, 0xe6, 0xf1, 0xa5, 0xd7, 0x56, 0x1b, 0xf6, 0x25, 0x3a, 0xb3, 0xa0, 0xf4, 0xa4, 0x68, - 0x3a, 0x62, 0x47, 0x45, 0x74, 0x25, 0xa2, 0x03, 0x9e, 0xc0, 0xa0, 0x7a, 0x6c, 0xe2, 0x75, 0xd5, - 0x9c, 0xbd, 0x92, 0xa4, 0xc3, 0x8d, 0xa1, 0x9f, 0xd0, 0x15, 0x11, 0xd1, 0x0b, 0x1a, 0xe6, 0x29, - 0x49, 0x3c, 0x57, 0x85, 0x18, 0xbe, 0x2a, 0xc4, 0x34, 0x25, 0x09, 0xde, 0xb5, 0x32, 0xb9, 0x92, - 0xb6, 0xcb, 0x31, 0x97, 0x34, 0x16, 0xc4, 0x83, 0x61, 0xe3, 0x04, 0xe1, 0x72, 0xf8, 0x43, 0x09, - 0x6e, 0xd0, 0xb4, 0xf5, 0xde, 0xb0, 0x21, 0xd3, 0x59, 0x54, 0xdb, 0x1f, 0x43, 0x3f, 0xe5, 0x79, - 0x54, 0x99, 0xda, 0x7d, 0x5d, 0x53, 0x56, 0x66, 0x4d, 0x95, 0x63, 0xb4, 0xa9, 0xbe, 0x36, 0x65, - 0xd1, 0xd2, 0x54, 0x49, 0xd3, 0xa6, 0xf6, 0xb4, 0x29, 0x8b, 0x2a, 0x53, 0xfe, 0xef, 0x0e, 0xb4, - 0xf5, 0x56, 0xe8, 0x03, 0x18, 0x2c, 0x0b, 0x56, 0xc4, 0xb7, 0x83, 0xe8, 0x6b, 0x76, 0xa7, 0xc2, - 0x75, 0x94, 0x53, 0xb8, 0xf7, 0x32, 0x75, 0xe3, 0xba, 0xed, 0xbf, 0x24, 0xd0, 0x6f, 0xe5, 0x18, - 0x7a, 0x45, 0x9a, 0xd2, 0x2c, 0x5c, 0xf0, 0x22, 0xb9, 0x34, 0x77, 0x0e, 0x14, 0x74, 0x2e, 0x91, - 0x8d, 0x5e, 0x68, 0xfc, 0xef, 0x5e, 0x80, 0xea, 0xc8, 0xe4, 0x45, 0xe4, 0x57, 0x57, 0x39, 0xd5, - 0x09, 0xee, 0x62, 0xb3, 0x92, 0x78, 0x4c, 0x93, 0x95, 0x58, 0xab, 0xdd, 0xfb, 0xd8, 0xac, 0xfc, - 0x5f, 0x1d, 0xe8, 0xda, 0xa1, 0xe8, 0x3e, 0xb4, 0x62, 0xd9, 0x8a, 0x9e, 0xa3, 0x5e, 0xd0, 0x71, - 0xbd, 0x87, 0xb2, 0x38, 0xb1, 0x66, 0xd7, 0x37, 0x0e, 0xfa, 0x1c, 0xdc, 0xb2, 0x75, 0x4d, 0xa8, - 0x83, 0x40, 0xf7, 0x72, 0x60, 0x7b, 0x39, 0x98, 0x59, 0x06, 0xae, 0xc8, 0xfe, 0x3f, 0x3b, 0xd0, - 0x9e, 0xa8, 0x96, 0x7f, 0x53, 0x47, 0x1f, 0x43, 0x6b, 0x25, 0x7b, 0xda, 0x94, 0xec, 0x3b, 0xf5, - 0x32, 0x55, 0xe5, 0x58, 0x33, 0xd1, 0x67, 0xd0, 0x59, 0xea, 0xee, 0x36, 0x66, 0x0f, 0xeb, 0x45, - 0xa6, 0xe0, 0xb1, 0x65, 0x4b, 0x61, 0xae, 0x8b, 0x55, 0xdd, 0x81, 0xad, 0x42, 0xd3, 0xbe, 0xd8, - 0xb2, 0xa5, 0xb0, 0xd0, 0x45, 0xa8, 0x4a, 0x63, 0xab, 0xd0, 0xb4, 0x25, 0xb6, 0x6c, 0xf4, 0x15, - 0xb8, 0x6b, 0xdb, 0x8f, 0xaa, 0x2c, 0xb6, 0x1e, 0x4c, 0x59, 0xa3, 0xb8, 0x52, 0xc8, 0x46, 0x2d, - 0xcf, 0x3a, 0x64, 0xb9, 0x6a, 0xa4, 0x06, 0xee, 0x95, 0xd8, 0x24, 0xf7, 0x7f, 0x73, 0x60, 0x57, - 0xbf, 0x81, 0x47, 0x84, 0x45, 0xf1, 0x75, 0xed, 0x27, 0x12, 0x41, 0x73, 0x4d, 0xe3, 0xd4, 0x7c, - 0x21, 0xd5, 0x6f, 0x74, 0x0a, 0x4d, 0xe9, 0x51, 0x1d, 0xe1, 0xde, 0xb6, 0x7f, 0xb8, 0x9e, 0x3c, - 0xbb, 0x4e, 0x29, 0x56, 0x6c, 0xd9, 0xb9, 0xfa, 0xab, 0xee, 0x35, 0x5f, 0xd5, 0xb9, 0x5a, 0x87, - 0x0d, 0xf7, 0xc3, 0x05, 0x40, 0x35, 0x09, 0xf5, 0xa0, 0xf3, 0xe0, 0xe9, 0xfc, 0xc9, 0x6c, 0x8c, - 0x07, 0x6f, 0x21, 0x17, 0x5a, 0x17, 0x67, 0xf3, 0x8b, 0xf1, 0xc0, 0x91, 0xf8, 0x74, 0x3e, 0x99, - 0x9c, 0xe1, 0xe7, 0x83, 0x1d, 0xb9, 0x98, 0x3f, 0x99, 0x3d, 0x7f, 0x36, 0x7e, 0x38, 0x68, 0xa0, - 0x3e, 0xb8, 0x8f, 0xbf, 0x9e, 0xce, 0x9e, 0x5e, 0xe0, 0xb3, 0xc9, 0xa0, 0x89, 0xde, 0x86, 0x3b, - 0x4a, 0x13, 0x56, 0x60, 0xeb, 0xdc, 0xff, 0xe3, 0xe6, 0xc8, 0xf9, 0xf3, 0xe6, 0xc8, 0xf9, 0xeb, - 0xe6, 0xc8, 0xf9, 0x6e, 0x3f, 0xe2, 0x61, 0x65, 0x2b, 0xd4, 0xb6, 0x16, 0x6d, 0x75, 0x9b, 0x3f, - 0xfd, 0x37, 0x00, 0x00, 0xff, 0xff, 0xf0, 0xbc, 0x25, 0x8b, 0xaf, 0x08, 0x00, 0x00, + // 923 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x8e, 0xdb, 0x44, + 0x18, 0xad, 0x1b, 0xe7, 0xc7, 0x5f, 0x36, 0xdb, 0x74, 0x88, 0x2a, 0x6b, 0x61, 0x37, 0xc1, 0x12, + 0xd2, 0x82, 0x50, 0x22, 0xa0, 0x08, 0x54, 0x40, 0x62, 0xb7, 0xdd, 0x6e, 0x51, 0x49, 0x5b, 0x26, + 0xc9, 0x45, 0xe1, 0xc2, 0x9a, 0x64, 0x67, 0x1d, 0x0b, 0xdb, 0x63, 0xec, 0x71, 0xc5, 0x72, 0xcf, + 0x25, 0xd7, 0xbc, 0x02, 0x4f, 0x82, 0x7a, 0xc9, 0x13, 0x20, 0xb4, 0xef, 0xc0, 0x3d, 0x9a, 0x3f, + 0x3b, 0x5b, 0x39, 0x85, 0x15, 0x77, 0x33, 0xc7, 0xe7, 0xfb, 0xe6, 0x9c, 0x99, 0xc9, 0x99, 0x80, + 0x17, 0xb2, 0x49, 0x9a, 0xb1, 0x98, 0xf2, 0x35, 0x2d, 0xf2, 0xc9, 0x2a, 0x0a, 0x69, 0xc2, 0x27, + 0x31, 0xe5, 0x59, 0xb8, 0xca, 0xc7, 0x69, 0xc6, 0x38, 0x43, 0x83, 0x90, 0x8d, 0x2b, 0xce, 0x58, + 0x71, 0xf6, 0x06, 0x01, 0x0b, 0x98, 0x24, 0x4c, 0xc4, 0x48, 0x71, 0xf7, 0x86, 0x01, 0x63, 0x41, + 0x44, 0x27, 0x72, 0xb6, 0x2c, 0xce, 0x27, 0x3c, 0x8c, 0x69, 0xce, 0x49, 0x9c, 0x2a, 0x82, 0xf7, + 0x31, 0x38, 0x5f, 0x93, 0x25, 0x8d, 0x9e, 0x91, 0x30, 0x43, 0x08, 0xec, 0x84, 0xc4, 0xd4, 0xb5, + 0x46, 0xd6, 0xa1, 0x83, 0xe5, 0x18, 0x0d, 0xa0, 0xf9, 0x82, 0x44, 0x05, 0x75, 0x6f, 0x4a, 0x50, + 0x4d, 0xbc, 0x7d, 0x68, 0x9e, 0x92, 0x22, 0xd8, 0xf8, 0x2c, 0x6a, 0x2c, 0xf3, 0xf9, 0x3b, 0x68, + 0xdf, 0x67, 0x45, 0xc2, 0x69, 0x56, 0x4f, 0x40, 0xf7, 0xa0, 0x43, 0x7f, 0xa4, 0x71, 0x1a, 0x91, + 0x4c, 0x36, 0xee, 0x7e, 0x78, 0x30, 0xae, 0xb3, 0x35, 0x3e, 0xd1, 0x2c, 0x5c, 0xf2, 0xbd, 0xcf, + 0xa1, 0xf3, 0x4d, 0x41, 0x12, 0x1e, 0x46, 0x14, 0xed, 0x41, 0xe7, 0x07, 0x3d, 0xd6, 0x0b, 0x94, + 0xf3, 0xab, 0xca, 0x4b, 0x69, 0xbf, 0x58, 0xd0, 0x9e, 0x15, 0x71, 0x4c, 0xb2, 0x0b, 0xf4, 0x36, + 0xec, 0xe4, 0x24, 0x4e, 0x23, 0xea, 0xaf, 0x84, 0x5a, 0xd9, 0xc1, 0xc6, 0x5d, 0x85, 0x49, 0x03, + 0x68, 0x1f, 0x40, 0x53, 0xf2, 0x22, 0xd6, 0x9d, 0x1c, 0x85, 0xcc, 0x8a, 0x18, 0x7d, 0xb9, 0xb1, + 0x7e, 0x63, 0xd4, 0xd8, 0xee, 0xc3, 0x28, 0x3e, 0xb6, 0x5f, 0xfe, 0x39, 0xbc, 0x51, 0xa9, 0xf4, + 0x86, 0xd0, 0x5e, 0x24, 0xfc, 0x22, 0xa5, 0x67, 0x5b, 0xf6, 0xf2, 0x6f, 0x1b, 0x9c, 0x47, 0x61, + 0xce, 0x59, 0x90, 0x91, 0xf8, 0xbf, 0x48, 0x7e, 0x1f, 0xd0, 0x26, 0xc5, 0x3f, 0x8f, 0x18, 0xe1, + 0xae, 0x2d, 0x7b, 0xf6, 0x37, 0x88, 0x0f, 0x05, 0xfe, 0x6f, 0x06, 0xef, 0x41, 0x6b, 0x59, 0xac, + 0xbe, 0xa7, 0x5c, 0xdb, 0x7b, 0xab, 0xde, 0xde, 0xb1, 0xe4, 0x68, 0x73, 0xba, 0x02, 0xdd, 0x81, + 0x56, 0xbe, 0x5a, 0xd3, 0x98, 0xb8, 0xcd, 0x91, 0x75, 0x78, 0x1b, 0xeb, 0x19, 0x7a, 0x07, 0x76, + 0x7f, 0xa2, 0x19, 0xf3, 0xf9, 0x3a, 0xa3, 0xf9, 0x9a, 0x45, 0x67, 0x6e, 0x4b, 0x2e, 0xdb, 0x13, + 0xe8, 0xdc, 0x80, 0x42, 0x99, 0xa4, 0x29, 0xa3, 0x6d, 0x69, 0xd4, 0x11, 0x88, 0xb2, 0x79, 0x08, + 0xfd, 0xea, 0xb3, 0x36, 0xd9, 0x91, 0x7d, 0x76, 0x4b, 0x92, 0xb2, 0xf8, 0x18, 0x7a, 0x09, 0x0d, + 0x08, 0x0f, 0x5f, 0x50, 0x3f, 0x4f, 0x49, 0xe2, 0x3a, 0xd2, 0xca, 0xe8, 0x75, 0x56, 0x66, 0x29, + 0x49, 0xb4, 0x9d, 0x1d, 0x53, 0x2c, 0x30, 0x21, 0xbe, 0x6c, 0x76, 0x46, 0x23, 0x4e, 0x5c, 0x18, + 0x35, 0x0e, 0x11, 0x2e, 0x97, 0x78, 0x20, 0xc0, 0x2b, 0x34, 0x65, 0xa0, 0x3b, 0x6a, 0x08, 0x8f, + 0x06, 0x55, 0x26, 0x1e, 0x43, 0x2f, 0x65, 0x79, 0x58, 0x49, 0xdb, 0xb9, 0x9e, 0x34, 0x53, 0x6c, + 0xa4, 0x95, 0xcd, 0x94, 0xb4, 0x9e, 0x92, 0x66, 0xd0, 0x52, 0x5a, 0x49, 0x53, 0xd2, 0x76, 0x95, + 0x34, 0x83, 0x4a, 0x69, 0xde, 0xef, 0x16, 0xb4, 0xd4, 0x82, 0xe8, 0x5d, 0xe8, 0xaf, 0x8a, 0xb8, + 0x88, 0x36, 0xed, 0xa8, 0x8b, 0x77, 0xab, 0xc2, 0x95, 0xa1, 0xbb, 0x70, 0xe7, 0x55, 0xea, 0x95, + 0x0b, 0x38, 0x78, 0xa5, 0x40, 0x9d, 0xd0, 0x10, 0xba, 0x45, 0x9a, 0xd2, 0xcc, 0x5f, 0xb2, 0x22, + 0x39, 0xd3, 0xb7, 0x10, 0x24, 0x74, 0x2c, 0x90, 0x2b, 0x79, 0xd1, 0xb8, 0x76, 0x5e, 0x40, 0xb5, + 0x71, 0xe2, 0x52, 0xb2, 0xf3, 0xf3, 0x9c, 0x2a, 0x07, 0xb7, 0xb1, 0x9e, 0x09, 0x3c, 0xa2, 0x49, + 0xc0, 0xd7, 0x72, 0xf5, 0x1e, 0xd6, 0x33, 0xef, 0x57, 0x0b, 0x3a, 0xa6, 0x29, 0xfa, 0x0c, 0x9a, + 0x91, 0x48, 0x4b, 0xd7, 0x92, 0xc7, 0x34, 0xac, 0xd7, 0x50, 0x06, 0xaa, 0x3e, 0x25, 0x55, 0x53, + 0x9f, 0x47, 0xe8, 0x53, 0x70, 0xca, 0x4c, 0xd6, 0xd6, 0xf6, 0xc6, 0x2a, 0xb5, 0xc7, 0x26, 0xb5, + 0xc7, 0x73, 0xc3, 0xc0, 0x15, 0xd9, 0xfb, 0xb9, 0x01, 0xad, 0xa9, 0x7c, 0x19, 0xfe, 0x9f, 0xae, + 0x0f, 0xa0, 0x19, 0x88, 0x2c, 0xd7, 0x41, 0xfc, 0x66, 0x7d, 0xb1, 0x8c, 0x7b, 0xac, 0x98, 0xe8, + 0x13, 0x68, 0xaf, 0x54, 0xbe, 0x6b, 0xc9, 0xfb, 0xf5, 0x45, 0xfa, 0x11, 0xc0, 0x86, 0x2d, 0x0a, + 0x73, 0x15, 0xbe, 0xf2, 0x3e, 0x6c, 0x2d, 0xd4, 0x09, 0x8d, 0x0d, 0x5b, 0x14, 0x16, 0x2a, 0x26, + 0x65, 0x98, 0x6c, 0x2d, 0xd4, 0x59, 0x8a, 0x0d, 0x1b, 0x7d, 0x01, 0xce, 0xda, 0xa4, 0xa7, 0x0c, + 0x91, 0xad, 0xdb, 0x53, 0x86, 0x2c, 0xae, 0x2a, 0x44, 0xde, 0x96, 0x3b, 0xee, 0xc7, 0xb9, 0x4c, + 0xaa, 0x06, 0xee, 0x96, 0xd8, 0x34, 0xf7, 0x7e, 0xb3, 0x60, 0x47, 0x9d, 0xc3, 0x43, 0x12, 0x87, + 0xd1, 0x45, 0xed, 0x33, 0x8a, 0xc0, 0x5e, 0xd3, 0x28, 0xd5, 0xaf, 0xa8, 0x1c, 0xa3, 0xbb, 0x60, + 0x0b, 0x8d, 0x72, 0x0b, 0x77, 0xb7, 0xfd, 0xe6, 0x55, 0xe7, 0xf9, 0x45, 0x4a, 0xb1, 0x64, 0x8b, + 0x44, 0x56, 0xff, 0x07, 0x5c, 0xfb, 0x75, 0x89, 0xac, 0xea, 0x4c, 0x22, 0xab, 0x8a, 0xf7, 0x96, + 0x00, 0x55, 0x3f, 0xd4, 0x85, 0xf6, 0xfd, 0xa7, 0x8b, 0x27, 0xf3, 0x13, 0xdc, 0xbf, 0x81, 0x1c, + 0x68, 0x9e, 0x1e, 0x2d, 0x4e, 0x4f, 0xfa, 0x96, 0xc0, 0x67, 0x8b, 0xe9, 0xf4, 0x08, 0x3f, 0xef, + 0xdf, 0x14, 0x93, 0xc5, 0x93, 0xf9, 0xf3, 0x67, 0x27, 0x0f, 0xfa, 0x0d, 0xd4, 0x03, 0xe7, 0xd1, + 0x57, 0xb3, 0xf9, 0xd3, 0x53, 0x7c, 0x34, 0xed, 0xdb, 0xe8, 0x0d, 0xb8, 0x25, 0x6b, 0xfc, 0x0a, + 0x6c, 0x1e, 0x7b, 0x2f, 0x2f, 0x0f, 0xac, 0x3f, 0x2e, 0x0f, 0xac, 0xbf, 0x2e, 0x0f, 0xac, 0x6f, + 0x07, 0x21, 0xf3, 0x2b, 0x71, 0xbe, 0x12, 0xb7, 0x6c, 0xc9, 0x9b, 0xfd, 0xd1, 0x3f, 0x01, 0x00, + 0x00, 0xff, 0xff, 0x52, 0x2d, 0xb5, 0x31, 0xef, 0x08, 0x00, 0x00, } func (m *LabelPair) Marshal() (dAtA []byte, err error) { @@ -2496,7 +2499,7 @@ func (m *Summary) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Quantile = append(m.Quantile, &Quantile{}) + m.Quantile = append(m.Quantile, Quantile{}) if err := m.Quantile[len(m.Quantile)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2673,7 +2676,7 @@ func (m *Histogram) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Bucket = append(m.Bucket, &Bucket{}) + m.Bucket = append(m.Bucket, Bucket{}) if err := m.Bucket[len(m.Bucket)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2780,7 +2783,7 @@ func (m *Histogram) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.NegativeSpan = append(m.NegativeSpan, &BucketSpan{}) + m.NegativeSpan = append(m.NegativeSpan, BucketSpan{}) if err := m.NegativeSpan[len(m.NegativeSpan)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2946,7 +2949,7 @@ func (m *Histogram) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PositiveSpan = append(m.PositiveSpan, &BucketSpan{}) + m.PositiveSpan = append(m.PositiveSpan, BucketSpan{}) if err := m.PositiveSpan[len(m.PositiveSpan)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -3382,7 +3385,7 @@ func (m *Exemplar) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Label = append(m.Label, &LabelPair{}) + m.Label = append(m.Label, LabelPair{}) if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -3514,7 +3517,7 @@ func (m *Metric) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Label = append(m.Label, &LabelPair{}) + m.Label = append(m.Label, LabelPair{}) if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -3881,7 +3884,7 @@ func (m *MetricFamily) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Metric = append(m.Metric, &Metric{}) + m.Metric = append(m.Metric, Metric{}) if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto index 20858f33db1..6bbea622f24 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto @@ -21,6 +21,7 @@ syntax = "proto3"; package io.prometheus.client; option go_package = "io_prometheus_client"; +import "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; message LabelPair { @@ -58,9 +59,9 @@ message Quantile { } message Summary { - uint64 sample_count = 1; - double sample_sum = 2; - repeated Quantile quantile = 3; + uint64 sample_count = 1; + double sample_sum = 2; + repeated Quantile quantile = 3 [(gogoproto.nullable) = false]; } message Untyped { @@ -72,7 +73,7 @@ message Histogram { double sample_count_float = 4; // Overrides sample_count if > 0. double sample_sum = 2; // Buckets for the conventional histogram. - repeated Bucket bucket = 3; // Ordered in increasing order of upper_bound, +Inf bucket is optional. + repeated Bucket bucket = 3 [(gogoproto.nullable) = false]; // Ordered in increasing order of upper_bound, +Inf bucket is optional. // Everything below here is for native histograms (also known as sparse histograms). // Native histograms are an experimental feature without stability guarantees. @@ -88,20 +89,20 @@ message Histogram { double zero_count_float = 8; // Overrides sb_zero_count if > 0. // Negative buckets for the native histogram. - repeated BucketSpan negative_span = 9; + repeated BucketSpan negative_span = 9 [(gogoproto.nullable) = false]; // Use either "negative_delta" or "negative_count", the former for // regular histograms with integer counts, the latter for float // histograms. - repeated sint64 negative_delta = 10; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). - repeated double negative_count = 11; // Absolute count of each bucket. + repeated sint64 negative_delta = 10; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double negative_count = 11; // Absolute count of each bucket. // Positive buckets for the native histogram. - repeated BucketSpan positive_span = 12; + repeated BucketSpan positive_span = 12 [(gogoproto.nullable) = false]; // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float // histograms. - repeated sint64 positive_delta = 13; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). - repeated double positive_count = 14; // Absolute count of each bucket. + repeated sint64 positive_delta = 13; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double positive_count = 14; // Absolute count of each bucket. } message Bucket { @@ -123,24 +124,24 @@ message BucketSpan { } message Exemplar { - repeated LabelPair label = 1; + repeated LabelPair label = 1 [(gogoproto.nullable) = false]; double value = 2; google.protobuf.Timestamp timestamp = 3; // OpenMetrics-style. } message Metric { - repeated LabelPair label = 1; - Gauge gauge = 2; - Counter counter = 3; - Summary summary = 4; - Untyped untyped = 5; - Histogram histogram = 7; - int64 timestamp_ms = 6; + repeated LabelPair label = 1 [(gogoproto.nullable) = false]; + Gauge gauge = 2; + Counter counter = 3; + Summary summary = 4; + Untyped untyped = 5; + Histogram histogram = 7; + int64 timestamp_ms = 6; } message MetricFamily { string name = 1; string help = 2; MetricType type = 3; - repeated Metric metric = 4; + repeated Metric metric = 4 [(gogoproto.nullable) = false]; } diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go index a4c6b332bba..e78e48809af 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go @@ -383,14 +383,14 @@ type Histogram struct { // *Histogram_ZeroCountFloat ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"` // Negative Buckets. - NegativeSpans []*BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans,omitempty"` + NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"` // Use either "negative_deltas" or "negative_counts", the former for // regular histograms with integer counts, the latter for float // histograms. NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"` NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"` // Positive Buckets. - PositiveSpans []*BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans,omitempty"` + PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"` // Use either "positive_deltas" or "positive_counts", the former for // regular histograms with integer counts, the latter for float // histograms. @@ -529,7 +529,7 @@ func (m *Histogram) GetZeroCountFloat() float64 { return 0 } -func (m *Histogram) GetNegativeSpans() []*BucketSpan { +func (m *Histogram) GetNegativeSpans() []BucketSpan { if m != nil { return m.NegativeSpans } @@ -550,7 +550,7 @@ func (m *Histogram) GetNegativeCounts() []float64 { return nil } -func (m *Histogram) GetPositiveSpans() []*BucketSpan { +func (m *Histogram) GetPositiveSpans() []BucketSpan { if m != nil { return m.PositiveSpans } @@ -1143,75 +1143,75 @@ func init() { func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 1075 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44, - 0x14, 0x5e, 0xdb, 0x89, 0x13, 0x9f, 0xfc, 0xd4, 0x3b, 0xda, 0x16, 0x53, 0xd1, 0x6d, 0xb0, 0x54, - 0x88, 0x10, 0xca, 0xaa, 0x85, 0x0b, 0x2a, 0x0a, 0xd2, 0x6e, 0xc9, 0xfe, 0x88, 0x26, 0x51, 0x27, - 0x59, 0x41, 0xb9, 0x89, 0x66, 0x93, 0xd9, 0xc4, 0xaa, 0xff, 0xf0, 0x4c, 0xaa, 0x0d, 0xef, 0xc1, - 0x1d, 0x2f, 0xc1, 0x3d, 0x12, 0xb7, 0xbd, 0xe4, 0x09, 0x10, 0xda, 0x2b, 0x1e, 0x03, 0xcd, 0xb1, - 0x1d, 0x3b, 0xdd, 0x82, 0x54, 0xee, 0xe6, 0x7c, 0xe7, 0x3b, 0x33, 0x9f, 0xe7, 0xfc, 0x8c, 0xa1, - 0x21, 0xd7, 0x31, 0x17, 0xbd, 0x38, 0x89, 0x64, 0x44, 0x20, 0x4e, 0xa2, 0x80, 0xcb, 0x25, 0x5f, - 0x89, 0xbb, 0x7b, 0x8b, 0x68, 0x11, 0x21, 0x7c, 0xa0, 0x56, 0x29, 0xc3, 0xfd, 0x45, 0x87, 0xf6, - 0x80, 0xcb, 0xc4, 0x9b, 0x0d, 0xb8, 0x64, 0x73, 0x26, 0x19, 0x79, 0x0c, 0x15, 0xb5, 0x87, 0xa3, - 0x75, 0xb4, 0x6e, 0xfb, 0xd1, 0x83, 0x5e, 0xb1, 0x47, 0x6f, 0x9b, 0x99, 0x99, 0x93, 0x75, 0xcc, - 0x29, 0x86, 0x90, 0x4f, 0x81, 0x04, 0x88, 0x4d, 0x2f, 0x59, 0xe0, 0xf9, 0xeb, 0x69, 0xc8, 0x02, - 0xee, 0xe8, 0x1d, 0xad, 0x6b, 0x51, 0x3b, 0xf5, 0x1c, 0xa3, 0x63, 0xc8, 0x02, 0x4e, 0x08, 0x54, - 0x96, 0xdc, 0x8f, 0x9d, 0x0a, 0xfa, 0x71, 0xad, 0xb0, 0x55, 0xe8, 0x49, 0xa7, 0x9a, 0x62, 0x6a, - 0xed, 0xae, 0x01, 0x8a, 0x93, 0x48, 0x03, 0x6a, 0xe7, 0xc3, 0x6f, 0x87, 0xa3, 0xef, 0x86, 0xf6, - 0x8e, 0x32, 0x9e, 0x8e, 0xce, 0x87, 0x93, 0x3e, 0xb5, 0x35, 0x62, 0x41, 0xf5, 0xe4, 0xf0, 0xfc, - 0xa4, 0x6f, 0xeb, 0xa4, 0x05, 0xd6, 0xe9, 0xd9, 0x78, 0x32, 0x3a, 0xa1, 0x87, 0x03, 0xdb, 0x20, - 0x04, 0xda, 0xe8, 0x29, 0xb0, 0x8a, 0x0a, 0x1d, 0x9f, 0x0f, 0x06, 0x87, 0xf4, 0x85, 0x5d, 0x25, - 0x75, 0xa8, 0x9c, 0x0d, 0x8f, 0x47, 0xb6, 0x49, 0x9a, 0x50, 0x1f, 0x4f, 0x0e, 0x27, 0xfd, 0x71, - 0x7f, 0x62, 0xd7, 0xdc, 0x27, 0x60, 0x8e, 0x59, 0x10, 0xfb, 0x9c, 0xec, 0x41, 0xf5, 0x15, 0xf3, - 0x57, 0xe9, 0xb5, 0x68, 0x34, 0x35, 0xc8, 0x07, 0x60, 0x49, 0x2f, 0xe0, 0x42, 0xb2, 0x20, 0xc6, - 0xef, 0x34, 0x68, 0x01, 0xb8, 0x11, 0xd4, 0xfb, 0x57, 0x3c, 0x88, 0x7d, 0x96, 0x90, 0x03, 0x30, - 0x7d, 0x76, 0xc1, 0x7d, 0xe1, 0x68, 0x1d, 0xa3, 0xdb, 0x78, 0xb4, 0x5b, 0xbe, 0xd7, 0x67, 0xca, - 0x73, 0x54, 0x79, 0xfd, 0xe7, 0xfd, 0x1d, 0x9a, 0xd1, 0x8a, 0x03, 0xf5, 0x7f, 0x3d, 0xd0, 0x78, - 0xf3, 0xc0, 0xdf, 0xab, 0x60, 0x9d, 0x7a, 0x42, 0x46, 0x8b, 0x84, 0x05, 0xe4, 0x1e, 0x58, 0xb3, - 0x68, 0x15, 0xca, 0xa9, 0x17, 0x4a, 0x94, 0x5d, 0x39, 0xdd, 0xa1, 0x75, 0x84, 0xce, 0x42, 0x49, - 0x3e, 0x84, 0x46, 0xea, 0xbe, 0xf4, 0x23, 0x26, 0xd3, 0x63, 0x4e, 0x77, 0x28, 0x20, 0x78, 0xac, - 0x30, 0x62, 0x83, 0x21, 0x56, 0x01, 0x9e, 0xa3, 0x51, 0xb5, 0x24, 0x77, 0xc0, 0x14, 0xb3, 0x25, - 0x0f, 0x18, 0x66, 0x6d, 0x97, 0x66, 0x16, 0x79, 0x00, 0xed, 0x9f, 0x78, 0x12, 0x4d, 0xe5, 0x32, - 0xe1, 0x62, 0x19, 0xf9, 0x73, 0xcc, 0xa0, 0x46, 0x5b, 0x0a, 0x9d, 0xe4, 0x20, 0xf9, 0x28, 0xa3, - 0x15, 0xba, 0x4c, 0xd4, 0xa5, 0xd1, 0xa6, 0xc2, 0x9f, 0xe6, 0xda, 0x3e, 0x01, 0xbb, 0xc4, 0x4b, - 0x05, 0xd6, 0x50, 0xa0, 0x46, 0xdb, 0x1b, 0x66, 0x2a, 0xf2, 0x2b, 0x68, 0x87, 0x7c, 0xc1, 0xa4, - 0xf7, 0x8a, 0x4f, 0x45, 0xcc, 0x42, 0xe1, 0xd4, 0xf1, 0x86, 0xef, 0x94, 0x6f, 0xf8, 0x68, 0x35, - 0x7b, 0xc9, 0xe5, 0x38, 0x66, 0x21, 0x6d, 0xe5, 0x6c, 0x65, 0x09, 0xf2, 0x31, 0xdc, 0xda, 0x84, - 0xcf, 0xb9, 0x2f, 0x99, 0x70, 0xac, 0x8e, 0xd1, 0x25, 0x74, 0xb3, 0xeb, 0x37, 0x88, 0x6e, 0x11, - 0x51, 0x97, 0x70, 0xa0, 0x63, 0x74, 0xb5, 0x82, 0x88, 0xa2, 0x84, 0x12, 0x14, 0x47, 0xc2, 0x2b, - 0x09, 0x6a, 0xfc, 0xb7, 0xa0, 0x9c, 0xbd, 0x11, 0xb4, 0x09, 0xcf, 0x04, 0x35, 0x53, 0x41, 0x39, - 0x5c, 0x08, 0xda, 0x10, 0x33, 0x41, 0xad, 0x54, 0x50, 0x0e, 0x67, 0x82, 0xbe, 0x06, 0x48, 0xb8, - 0xe0, 0x72, 0xba, 0x54, 0x37, 0xde, 0xc6, 0xbe, 0xbe, 0x5f, 0x16, 0xb3, 0xa9, 0x99, 0x1e, 0x55, - 0xbc, 0x53, 0x2f, 0x94, 0xd4, 0x4a, 0xf2, 0xe5, 0x76, 0xd1, 0xdd, 0x7a, 0xb3, 0xe8, 0x3e, 0x07, - 0x6b, 0x13, 0xb5, 0xdd, 0x9d, 0x35, 0x30, 0x5e, 0xf4, 0xc7, 0xb6, 0x46, 0x4c, 0xd0, 0x87, 0x23, - 0x5b, 0x2f, 0x3a, 0xd4, 0x38, 0xaa, 0x41, 0x15, 0x35, 0x1f, 0x35, 0x01, 0x8a, 0x54, 0xbb, 0x4f, - 0x00, 0x8a, 0x9b, 0x51, 0xd5, 0x16, 0x5d, 0x5e, 0x0a, 0x9e, 0x96, 0xef, 0x2e, 0xcd, 0x2c, 0x85, - 0xfb, 0x3c, 0x5c, 0xc8, 0x25, 0x56, 0x6d, 0x8b, 0x66, 0x96, 0xfb, 0xb7, 0x06, 0x30, 0xf1, 0x02, - 0x3e, 0xe6, 0x89, 0xc7, 0xc5, 0xbb, 0xf7, 0xdc, 0x23, 0xa8, 0x09, 0x6c, 0x77, 0xe1, 0xe8, 0x18, - 0x41, 0xca, 0x11, 0xe9, 0x24, 0xc8, 0x42, 0x72, 0x22, 0xf9, 0x02, 0x2c, 0x9e, 0x35, 0xb9, 0x70, - 0x0c, 0x8c, 0xda, 0x2b, 0x47, 0xe5, 0x13, 0x20, 0x8b, 0x2b, 0xc8, 0xe4, 0x4b, 0x80, 0x65, 0x7e, - 0xf1, 0xc2, 0xa9, 0x60, 0xe8, 0xed, 0xb7, 0xa6, 0x25, 0x8b, 0x2d, 0xd1, 0xdd, 0x87, 0x50, 0xc5, - 0x2f, 0x50, 0x13, 0x13, 0xa7, 0xac, 0x96, 0x4e, 0x4c, 0xb5, 0xde, 0x9e, 0x1d, 0x56, 0x36, 0x3b, - 0xdc, 0xc7, 0x60, 0x3e, 0x4b, 0xbf, 0xf3, 0x5d, 0x2f, 0xc6, 0xfd, 0x59, 0x83, 0x26, 0xe2, 0x03, - 0x26, 0x67, 0x4b, 0x9e, 0x90, 0x87, 0x5b, 0x8f, 0xc4, 0xbd, 0x1b, 0xf1, 0x19, 0xaf, 0x57, 0x7a, - 0x1c, 0x72, 0xa1, 0xfa, 0xdb, 0x84, 0x1a, 0x65, 0xa1, 0x5d, 0xa8, 0xe0, 0xa8, 0x37, 0x41, 0xef, - 0x3f, 0x4f, 0xeb, 0x68, 0xd8, 0x7f, 0x9e, 0xd6, 0x11, 0x55, 0xe3, 0x5d, 0x01, 0xb4, 0x6f, 0x1b, - 0xee, 0xaf, 0x9a, 0x2a, 0x3e, 0x36, 0x57, 0xb5, 0x27, 0xc8, 0x7b, 0x50, 0x13, 0x92, 0xc7, 0xd3, - 0x40, 0xa0, 0x2e, 0x83, 0x9a, 0xca, 0x1c, 0x08, 0x75, 0xf4, 0xe5, 0x2a, 0x9c, 0xe5, 0x47, 0xab, - 0x35, 0x79, 0x1f, 0xea, 0x42, 0xb2, 0x44, 0x2a, 0x76, 0x3a, 0x48, 0x6b, 0x68, 0x0f, 0x04, 0xb9, - 0x0d, 0x26, 0x0f, 0xe7, 0x53, 0x4c, 0x8a, 0x72, 0x54, 0x79, 0x38, 0x1f, 0x08, 0x72, 0x17, 0xea, - 0x8b, 0x24, 0x5a, 0xc5, 0x5e, 0xb8, 0x70, 0xaa, 0x1d, 0xa3, 0x6b, 0xd1, 0x8d, 0x4d, 0xda, 0xa0, - 0x5f, 0xac, 0x71, 0x98, 0xd5, 0xa9, 0x7e, 0xb1, 0x56, 0xbb, 0x27, 0x2c, 0x5c, 0x70, 0xb5, 0x49, - 0x2d, 0xdd, 0x1d, 0xed, 0x81, 0x70, 0x7f, 0xd3, 0xa0, 0xfa, 0x74, 0xb9, 0x0a, 0x5f, 0x92, 0x7d, - 0x68, 0x04, 0x5e, 0x38, 0x55, 0xad, 0x54, 0x68, 0xb6, 0x02, 0x2f, 0x54, 0x35, 0x3c, 0x10, 0xe8, - 0x67, 0x57, 0x1b, 0x7f, 0xf6, 0xbe, 0x04, 0xec, 0x2a, 0xf3, 0xf7, 0xb2, 0x24, 0x18, 0x98, 0x84, - 0xbb, 0xe5, 0x24, 0xe0, 0x01, 0xbd, 0x7e, 0x38, 0x8b, 0xe6, 0x5e, 0xb8, 0x28, 0x32, 0xa0, 0xde, - 0x6d, 0xfc, 0xaa, 0x26, 0xc5, 0xb5, 0x7b, 0x00, 0xf5, 0x9c, 0x75, 0xa3, 0x79, 0xbf, 0x1f, 0xa9, - 0x67, 0x75, 0xeb, 0x2d, 0xd5, 0xdd, 0x1f, 0xa1, 0x85, 0x9b, 0xf3, 0xf9, 0xff, 0xed, 0xb2, 0x03, - 0x30, 0x67, 0x6a, 0x87, 0xbc, 0xc9, 0x76, 0x6f, 0x08, 0xcf, 0x03, 0x52, 0xda, 0xd1, 0xde, 0xeb, - 0xeb, 0x7d, 0xed, 0x8f, 0xeb, 0x7d, 0xed, 0xaf, 0xeb, 0x7d, 0xed, 0x07, 0x53, 0xb1, 0xe3, 0x8b, - 0x0b, 0x13, 0xff, 0x60, 0x3e, 0xfb, 0x27, 0x00, 0x00, 0xff, 0xff, 0x36, 0xd7, 0x1e, 0xb4, 0xf2, - 0x08, 0x00, 0x00, + // 1081 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdb, 0x6e, 0xdb, 0x46, + 0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0x0b, 0x27, 0x3f, 0xff, 0xa0, 0x71, 0x54, 0x02, + 0x69, 0x85, 0xa2, 0x90, 0x91, 0xb4, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x5d, 0xf9, 0x80, 0x46, 0x12, + 0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0x78, 0x2a, 0x77, 0x15, 0x58, 0x7d, + 0x8f, 0xde, 0xf5, 0x25, 0x7a, 0xdf, 0x07, 0x08, 0xd0, 0x9b, 0x3e, 0x41, 0x51, 0xf8, 0xaa, 0x8f, + 0x51, 0xec, 0x90, 0x14, 0xa9, 0x38, 0x05, 0x9a, 0xde, 0xed, 0x7c, 0xf3, 0xcd, 0xec, 0xc7, 0xdd, + 0x99, 0x59, 0x42, 0x43, 0xae, 0x63, 0x2e, 0x7a, 0x71, 0x12, 0xc9, 0x88, 0x40, 0x9c, 0x44, 0x01, + 0x97, 0x4b, 0xbe, 0x12, 0xf7, 0xf7, 0x16, 0xd1, 0x22, 0x42, 0xf8, 0x40, 0xad, 0x52, 0x86, 0xfb, + 0xb3, 0x0e, 0xed, 0x01, 0x97, 0x89, 0x37, 0x1b, 0x70, 0xc9, 0xe6, 0x4c, 0x32, 0xf2, 0x14, 0x2a, + 0x2a, 0x87, 0xa3, 0x75, 0xb4, 0x6e, 0xfb, 0xc9, 0xa3, 0x5e, 0x91, 0xa3, 0xb7, 0xcd, 0xcc, 0xcc, + 0xc9, 0x3a, 0xe6, 0x14, 0x43, 0xc8, 0xa7, 0x40, 0x02, 0xc4, 0xa6, 0x57, 0x2c, 0xf0, 0xfc, 0xf5, + 0x34, 0x64, 0x01, 0x77, 0xf4, 0x8e, 0xd6, 0xb5, 0xa8, 0x9d, 0x7a, 0x4e, 0xd0, 0x31, 0x64, 0x01, + 0x27, 0x04, 0x2a, 0x4b, 0xee, 0xc7, 0x4e, 0x05, 0xfd, 0xb8, 0x56, 0xd8, 0x2a, 0xf4, 0xa4, 0x53, + 0x4d, 0x31, 0xb5, 0x76, 0xd7, 0x00, 0xc5, 0x4e, 0xa4, 0x01, 0xb5, 0x8b, 0xe1, 0x37, 0xc3, 0xd1, + 0xb7, 0x43, 0x7b, 0x47, 0x19, 0xc7, 0xa3, 0x8b, 0xe1, 0xa4, 0x4f, 0x6d, 0x8d, 0x58, 0x50, 0x3d, + 0x3d, 0xbc, 0x38, 0xed, 0xdb, 0x3a, 0x69, 0x81, 0x75, 0x76, 0x3e, 0x9e, 0x8c, 0x4e, 0xe9, 0xe1, + 0xc0, 0x36, 0x08, 0x81, 0x36, 0x7a, 0x0a, 0xac, 0xa2, 0x42, 0xc7, 0x17, 0x83, 0xc1, 0x21, 0x7d, + 0x69, 0x57, 0x49, 0x1d, 0x2a, 0xe7, 0xc3, 0x93, 0x91, 0x6d, 0x92, 0x26, 0xd4, 0xc7, 0x93, 0xc3, + 0x49, 0x7f, 0xdc, 0x9f, 0xd8, 0x35, 0xf7, 0x19, 0x98, 0x63, 0x16, 0xc4, 0x3e, 0x27, 0x7b, 0x50, + 0x7d, 0xcd, 0xfc, 0x55, 0x7a, 0x2c, 0x1a, 0x4d, 0x0d, 0xf2, 0x01, 0x58, 0xd2, 0x0b, 0xb8, 0x90, + 0x2c, 0x88, 0xf1, 0x3b, 0x0d, 0x5a, 0x00, 0x6e, 0x04, 0xf5, 0xfe, 0x35, 0x0f, 0x62, 0x9f, 0x25, + 0xe4, 0x00, 0x4c, 0x9f, 0x5d, 0x72, 0x5f, 0x38, 0x5a, 0xc7, 0xe8, 0x36, 0x9e, 0xec, 0x96, 0xcf, + 0xf5, 0xb9, 0xf2, 0x1c, 0x55, 0xde, 0xfc, 0xf1, 0x70, 0x87, 0x66, 0xb4, 0x62, 0x43, 0xfd, 0x1f, + 0x37, 0x34, 0xde, 0xde, 0xf0, 0xb7, 0x2a, 0x58, 0x67, 0x9e, 0x90, 0xd1, 0x22, 0x61, 0x01, 0x79, + 0x00, 0xd6, 0x2c, 0x5a, 0x85, 0x72, 0xea, 0x85, 0x12, 0x65, 0x57, 0xce, 0x76, 0x68, 0x1d, 0xa1, + 0xf3, 0x50, 0x92, 0x0f, 0xa1, 0x91, 0xba, 0xaf, 0xfc, 0x88, 0xc9, 0x74, 0x9b, 0xb3, 0x1d, 0x0a, + 0x08, 0x9e, 0x28, 0x8c, 0xd8, 0x60, 0x88, 0x55, 0x80, 0xfb, 0x68, 0x54, 0x2d, 0xc9, 0x3d, 0x30, + 0xc5, 0x6c, 0xc9, 0x03, 0x86, 0xb7, 0xb6, 0x4b, 0x33, 0x8b, 0x3c, 0x82, 0xf6, 0x8f, 0x3c, 0x89, + 0xa6, 0x72, 0x99, 0x70, 0xb1, 0x8c, 0xfc, 0x39, 0xde, 0xa0, 0x46, 0x5b, 0x0a, 0x9d, 0xe4, 0x20, + 0xf9, 0x28, 0xa3, 0x15, 0xba, 0x4c, 0xd4, 0xa5, 0xd1, 0xa6, 0xc2, 0x8f, 0x73, 0x6d, 0x9f, 0x80, + 0x5d, 0xe2, 0xa5, 0x02, 0x6b, 0x28, 0x50, 0xa3, 0xed, 0x0d, 0x33, 0x15, 0x79, 0x0c, 0xed, 0x90, + 0x2f, 0x98, 0xf4, 0x5e, 0xf3, 0xa9, 0x88, 0x59, 0x28, 0x9c, 0x3a, 0x9e, 0xf0, 0xbd, 0xf2, 0x09, + 0x1f, 0xad, 0x66, 0xaf, 0xb8, 0x1c, 0xc7, 0x2c, 0xcc, 0x8e, 0xb9, 0x95, 0xc7, 0x28, 0x4c, 0x90, + 0x8f, 0xe1, 0xce, 0x26, 0xc9, 0x9c, 0xfb, 0x92, 0x09, 0xc7, 0xea, 0x18, 0x5d, 0x42, 0x37, 0xb9, + 0xbf, 0x46, 0x74, 0x8b, 0x88, 0xea, 0x84, 0x03, 0x1d, 0xa3, 0xab, 0x15, 0x44, 0x94, 0x26, 0x94, + 0xac, 0x38, 0x12, 0x5e, 0x49, 0x56, 0xe3, 0xdf, 0xc8, 0xca, 0x63, 0x36, 0xb2, 0x36, 0x49, 0x32, + 0x59, 0xcd, 0x54, 0x56, 0x0e, 0x17, 0xb2, 0x36, 0xc4, 0x4c, 0x56, 0x2b, 0x95, 0x95, 0xc3, 0x99, + 0xac, 0xaf, 0x00, 0x12, 0x2e, 0xb8, 0x9c, 0x2e, 0xd5, 0xe9, 0xb7, 0xb1, 0xc7, 0x1f, 0x96, 0x25, + 0x6d, 0xea, 0xa7, 0x47, 0x15, 0xef, 0xcc, 0x0b, 0x25, 0xb5, 0x92, 0x7c, 0xb9, 0x5d, 0x80, 0x77, + 0xde, 0x2e, 0xc0, 0xcf, 0xc1, 0xda, 0x44, 0x6d, 0x77, 0x6a, 0x0d, 0x8c, 0x97, 0xfd, 0xb1, 0xad, + 0x11, 0x13, 0xf4, 0xe1, 0xc8, 0xd6, 0x8b, 0x6e, 0x35, 0x8e, 0x6a, 0x50, 0x45, 0xcd, 0x47, 0x4d, + 0x80, 0xe2, 0xda, 0xdd, 0x67, 0x00, 0xc5, 0xf9, 0xa8, 0xca, 0x8b, 0xae, 0xae, 0x04, 0x4f, 0x4b, + 0x79, 0x97, 0x66, 0x96, 0xc2, 0x7d, 0x1e, 0x2e, 0xe4, 0x12, 0x2b, 0xb8, 0x45, 0x33, 0xcb, 0xfd, + 0x4b, 0x03, 0x98, 0x78, 0x01, 0x1f, 0xf3, 0xc4, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x09, 0xd4, 0x04, + 0xb6, 0xbe, 0x70, 0x74, 0x8c, 0x20, 0xe5, 0x88, 0x74, 0x2a, 0x64, 0x21, 0x39, 0x91, 0x7c, 0x01, + 0x16, 0xcf, 0x1a, 0x5e, 0x38, 0x06, 0x46, 0xed, 0x95, 0xa3, 0xf2, 0x69, 0x90, 0xc5, 0x15, 0x64, + 0xf2, 0x25, 0xc0, 0x32, 0x3f, 0x78, 0xe1, 0x54, 0x30, 0xf4, 0xee, 0x3b, 0xaf, 0x25, 0x8b, 0x2d, + 0xd1, 0xdd, 0xc7, 0x50, 0xc5, 0x2f, 0x50, 0xd3, 0x13, 0x27, 0xae, 0x96, 0x4e, 0x4f, 0xb5, 0xde, + 0x9e, 0x23, 0x56, 0x36, 0x47, 0xdc, 0xa7, 0x60, 0x3e, 0x4f, 0xbf, 0xf3, 0x7d, 0x0f, 0xc6, 0xfd, + 0x49, 0x83, 0x26, 0xe2, 0x03, 0x26, 0x67, 0x4b, 0x9e, 0x90, 0xc7, 0x5b, 0x0f, 0xc6, 0x83, 0x5b, + 0xf1, 0x19, 0xaf, 0x57, 0x7a, 0x28, 0x72, 0xa1, 0xfa, 0xbb, 0x84, 0x1a, 0x65, 0xa1, 0x5d, 0xa8, + 0xe0, 0xd8, 0x37, 0x41, 0xef, 0xbf, 0x48, 0xeb, 0x68, 0xd8, 0x7f, 0x91, 0xd6, 0x11, 0x55, 0xa3, + 0x5e, 0x01, 0xb4, 0x6f, 0x1b, 0xee, 0x2f, 0x9a, 0x2a, 0x3e, 0x36, 0x57, 0xb5, 0x27, 0xc8, 0xff, + 0xa0, 0x26, 0x24, 0x8f, 0xa7, 0x81, 0x40, 0x5d, 0x06, 0x35, 0x95, 0x39, 0x10, 0x6a, 0xeb, 0xab, + 0x55, 0x38, 0xcb, 0xb7, 0x56, 0x6b, 0xf2, 0x7f, 0xa8, 0x0b, 0xc9, 0x12, 0xa9, 0xd8, 0xe9, 0x50, + 0xad, 0xa1, 0x3d, 0x10, 0xe4, 0x2e, 0x98, 0x3c, 0x9c, 0x4f, 0xf1, 0x52, 0x94, 0xa3, 0xca, 0xc3, + 0xf9, 0x40, 0x90, 0xfb, 0x50, 0x5f, 0x24, 0xd1, 0x2a, 0xf6, 0xc2, 0x85, 0x53, 0xed, 0x18, 0x5d, + 0x8b, 0x6e, 0x6c, 0xd2, 0x06, 0xfd, 0x72, 0x8d, 0x83, 0xad, 0x4e, 0xf5, 0xcb, 0xb5, 0xca, 0x9e, + 0xb0, 0x70, 0xc1, 0x55, 0x92, 0x5a, 0x9a, 0x1d, 0xed, 0x81, 0x70, 0x7f, 0xd5, 0xa0, 0x7a, 0xbc, + 0x5c, 0x85, 0xaf, 0xc8, 0x3e, 0x34, 0x02, 0x2f, 0x9c, 0xaa, 0x56, 0x2a, 0x34, 0x5b, 0x81, 0x17, + 0xaa, 0x1a, 0x1e, 0x08, 0xf4, 0xb3, 0xeb, 0x8d, 0x3f, 0x7b, 0x6b, 0x02, 0x76, 0x9d, 0xf9, 0x7b, + 0xd9, 0x25, 0x18, 0x78, 0x09, 0xf7, 0xcb, 0x97, 0x80, 0x1b, 0xf4, 0xfa, 0xe1, 0x2c, 0x9a, 0x7b, + 0xe1, 0xa2, 0xb8, 0x01, 0xf5, 0x86, 0xe3, 0x57, 0x35, 0x29, 0xae, 0xdd, 0x03, 0xa8, 0xe7, 0xac, + 0x5b, 0xcd, 0xfb, 0xdd, 0x48, 0x3d, 0xb1, 0x5b, 0xef, 0xaa, 0xee, 0xfe, 0x00, 0x2d, 0x4c, 0xce, + 0xe7, 0xff, 0xb5, 0xcb, 0x0e, 0xc0, 0x9c, 0xa9, 0x0c, 0x79, 0x93, 0xed, 0xde, 0x12, 0x9e, 0x07, + 0xa4, 0xb4, 0xa3, 0xbd, 0x37, 0x37, 0xfb, 0xda, 0xef, 0x37, 0xfb, 0xda, 0x9f, 0x37, 0xfb, 0xda, + 0xf7, 0xa6, 0x62, 0xc7, 0x97, 0x97, 0x26, 0xfe, 0xcd, 0x7c, 0xf6, 0x77, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x53, 0x09, 0xe5, 0x37, 0xfe, 0x08, 0x00, 0x00, } func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { @@ -2903,7 +2903,7 @@ func (m *Histogram) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.NegativeSpans = append(m.NegativeSpans, &BucketSpan{}) + m.NegativeSpans = append(m.NegativeSpans, BucketSpan{}) if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -3069,7 +3069,7 @@ func (m *Histogram) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PositiveSpans = append(m.PositiveSpans, &BucketSpan{}) + m.PositiveSpans = append(m.PositiveSpans, BucketSpan{}) if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.proto b/vendor/github.com/prometheus/prometheus/prompb/types.proto index e6a1e107c9e..57216b81d9e 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/types.proto @@ -86,9 +86,9 @@ message Histogram { uint64 zero_count_int = 6; double zero_count_float = 7; } - + // Negative Buckets. - repeated BucketSpan negative_spans = 8; + repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false]; // Use either "negative_deltas" or "negative_counts", the former for // regular histograms with integer counts, the latter for float // histograms. @@ -96,7 +96,7 @@ message Histogram { repeated double negative_counts = 10; // Absolute count of each bucket. // Positive Buckets. - repeated BucketSpan positive_spans = 11; + repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false]; // Use either "positive_deltas" or "positive_counts", the former for // regular histograms with integer counts, the latter for float // histograms. @@ -107,7 +107,7 @@ message Histogram { // timestamp is in ms format, see model/timestamp/timestamp.go for // conversion from time.Time to Prometheus timestamp. int64 timestamp = 15; -} +} // A BucketSpan defines a number of consecutive buckets with their // offset. Logically, it would be more straightforward to include the diff --git a/vendor/github.com/prometheus/prometheus/promql/value.go b/vendor/github.com/prometheus/prometheus/promql/value.go index 4db976e979b..91904dda25a 100644 --- a/vendor/github.com/prometheus/prometheus/promql/value.go +++ b/vendor/github.com/prometheus/prometheus/promql/value.go @@ -82,7 +82,7 @@ func (s Series) String() string { func (s Series) MarshalJSON() ([]byte, error) { // Note that this is rather inefficient because it re-creates the whole // series, just separated by Histogram Points and Value Points. For API - // purposes, there is a more efficcient jsoniter implementation in + // purposes, there is a more efficient jsoniter implementation in // web/api/v1/api.go. series := struct { M labels.Labels `json:"metric"` diff --git a/vendor/github.com/prometheus/prometheus/rules/alerting.go b/vendor/github.com/prometheus/prometheus/rules/alerting.go index b87733f5111..2f4de0f3d9d 100644 --- a/vendor/github.com/prometheus/prometheus/rules/alerting.go +++ b/vendor/github.com/prometheus/prometheus/rules/alerting.go @@ -322,6 +322,8 @@ const resolvedRetention = 15 * time.Minute // Eval evaluates the rule expression and then creates pending alerts and fires // or removes previously pending alerts accordingly. func (r *AlertingRule) Eval(ctx context.Context, evalDelay time.Duration, ts time.Time, query QueryFunc, externalURL *url.URL, limit int) (promql.Vector, error) { + ctx = NewOriginContext(ctx, NewRuleDetail(r)) + res, err := query(ctx, r.vector.String(), ts.Add(-evalDelay)) if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/prometheus/rules/origin.go b/vendor/github.com/prometheus/prometheus/rules/origin.go new file mode 100644 index 00000000000..996538767d5 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/rules/origin.go @@ -0,0 +1,69 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "context" + "fmt" + + "github.com/prometheus/prometheus/model/labels" +) + +type ruleOrigin struct{} + +// RuleDetail contains information about the rule that is being evaluated. +type RuleDetail struct { + Name string + Query string + Labels labels.Labels + Kind string +} + +const ( + KindAlerting = "alerting" + KindRecording = "recording" +) + +// NewRuleDetail creates a RuleDetail from a given Rule. +func NewRuleDetail(r Rule) RuleDetail { + var kind string + switch r.(type) { + case *AlertingRule: + kind = KindAlerting + case *RecordingRule: + kind = KindRecording + default: + panic(fmt.Sprintf(`unknown rule type "%T"`, r)) + } + + return RuleDetail{ + Name: r.Name(), + Query: r.Query().String(), + Labels: r.Labels(), + Kind: kind, + } +} + +// NewOriginContext returns a new context with data about the origin attached. +func NewOriginContext(ctx context.Context, rule RuleDetail) context.Context { + return context.WithValue(ctx, ruleOrigin{}, rule) +} + +// FromOriginContext returns the RuleDetail origin data from the context. +func FromOriginContext(ctx context.Context) RuleDetail { + if rule, ok := ctx.Value(ruleOrigin{}).(RuleDetail); ok { + return rule + } + return RuleDetail{} +} diff --git a/vendor/github.com/prometheus/prometheus/rules/recording.go b/vendor/github.com/prometheus/prometheus/rules/recording.go index 8e3bd666688..a143171c0bc 100644 --- a/vendor/github.com/prometheus/prometheus/rules/recording.go +++ b/vendor/github.com/prometheus/prometheus/rules/recording.go @@ -73,6 +73,8 @@ func (rule *RecordingRule) Labels() labels.Labels { // Eval evaluates the rule and then overrides the metric names and labels accordingly. func (rule *RecordingRule) Eval(ctx context.Context, evalDelay time.Duration, ts time.Time, query QueryFunc, _ *url.URL, limit int) (promql.Vector, error) { + ctx = NewOriginContext(ctx, NewRuleDetail(rule)) + vector, err := query(ctx, rule.vector.String(), ts.Add(-evalDelay)) if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/prometheus/scrape/manager.go b/vendor/github.com/prometheus/prometheus/scrape/manager.go index e0a7102850f..69a0eaa1f7d 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/manager.go +++ b/vendor/github.com/prometheus/prometheus/scrape/manager.go @@ -270,8 +270,13 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { m.mtxScrape.Lock() defer m.mtxScrape.Unlock() + scfgs, err := cfg.GetScrapeConfigs() + if err != nil { + return err + } + c := make(map[string]*config.ScrapeConfig) - for _, scfg := range cfg.ScrapeConfigs { + for _, scfg := range scfgs { c[scfg.JobName] = scfg } m.scrapeConfigs = c diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index 562b376caa0..a92bd22cb27 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -328,7 +328,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed options.PassMetadataInContext, ) } - + targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) return sp, nil } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go index 36bff282163..e3ef58c3519 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go @@ -559,7 +559,7 @@ func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogr } } -func spansProtoToSpans(s []*prompb.BucketSpan) []histogram.Span { +func spansProtoToSpans(s []prompb.BucketSpan) []histogram.Span { spans := make([]histogram.Span, len(s)) for i := 0; i < len(s); i++ { spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} @@ -600,10 +600,10 @@ func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogra } } -func spansToSpansProto(s []histogram.Span) []*prompb.BucketSpan { - spans := make([]*prompb.BucketSpan, len(s)) +func spansToSpansProto(s []histogram.Span) []prompb.BucketSpan { + spans := make([]prompb.BucketSpan, len(s)) for i := 0; i < len(s); i++ { - spans[i] = &prompb.BucketSpan{Offset: s[i].Offset, Length: s[i].Length} + spans[i] = prompb.BucketSpan{Offset: s[i].Offset, Length: s[i].Length} } return spans diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go index 30c0750a16d..7ea81ae8fce 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go @@ -894,6 +894,10 @@ func (t *QueueManager) releaseLabels(ls labels.Labels) { // processExternalLabels merges externalLabels into ls. If ls contains // a label in externalLabels, the value in ls wins. func processExternalLabels(ls labels.Labels, externalLabels []labels.Label) labels.Labels { + if len(externalLabels) == 0 { + return ls + } + b := labels.NewScratchBuilder(ls.Len() + len(externalLabels)) j := 0 ls.Range(func(l labels.Label) { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index c39b0786da4..a8d53a3ab6b 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -131,6 +131,10 @@ type Options struct { // WALCompression will turn on Snappy compression for records on the WAL. WALCompression bool + // Maximum number of CPUs that can simultaneously processes WAL replay. + // If it is <=0, then GOMAXPROCS is used. + WALReplayConcurrency int + // StripeSize is the size in entries of the series hash map. Reducing the size will save memory but impact performance. StripeSize int @@ -813,6 +817,9 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs headOpts.PostingsForMatchersCacheTTL = opts.HeadPostingsForMatchersCacheTTL headOpts.PostingsForMatchersCacheSize = opts.HeadPostingsForMatchersCacheSize headOpts.PostingsForMatchersCacheForce = opts.HeadPostingsForMatchersCacheForce + if opts.WALReplayConcurrency > 0 { + headOpts.WALReplayConcurrency = opts.WALReplayConcurrency + } if opts.IsolationDisabled { // We only override this flag if isolation is disabled at DB level. We use the default otherwise. headOpts.IsolationDisabled = opts.IsolationDisabled diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index b751e8a0cc2..ac166a74951 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -17,8 +17,8 @@ import ( "fmt" "io" "math" - "math/rand" "path/filepath" + "runtime" "sync" "time" @@ -59,6 +59,8 @@ var ( // defaultIsolationDisabled is true if isolation is disabled by default. defaultIsolationDisabled = false + + defaultWALReplayConcurrency = runtime.GOMAXPROCS(0) ) // chunkDiskMapper is a temporary interface while we transition from @@ -176,6 +178,11 @@ type HeadOptions struct { PostingsForMatchersCacheTTL time.Duration PostingsForMatchersCacheSize int PostingsForMatchersCacheForce bool + + // Maximum number of CPUs that can simultaneously processes WAL replay. + // The default value is GOMAXPROCS. + // If it is set to a negative value or zero, the default value is used. + WALReplayConcurrency int } const ( @@ -197,6 +204,7 @@ func DefaultHeadOptions() *HeadOptions { PostingsForMatchersCacheTTL: defaultPostingsForMatchersCacheTTL, PostingsForMatchersCacheSize: defaultPostingsForMatchersCacheSize, PostingsForMatchersCacheForce: false, + WALReplayConcurrency: defaultWALReplayConcurrency, } ho.OutOfOrderCapMax.Store(DefaultOutOfOrderCapMax) return ho @@ -274,6 +282,10 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wlog.WL, opts *Hea opts.ChunkPool = chunkenc.NewPool() } + if opts.WALReplayConcurrency <= 0 { + opts.WALReplayConcurrency = defaultWALReplayConcurrency + } + h.chunkDiskMapper, err = chunks.NewChunkDiskMapper( r, mmappedChunksDir(opts.ChunkDirRoot), @@ -530,6 +542,17 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { }, func() float64 { return float64(h.iso.lastAppendID()) }), + prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Name: "prometheus_tsdb_head_chunks_storage_size_bytes", + Help: "Size of the chunks_head directory.", + }, func() float64 { + val, err := h.chunkDiskMapper.Size() + if err != nil { + level.Error(h.logger).Log("msg", "Failed to calculate size of \"chunks_head\" dir", + "err", err.Error()) + } + return float64(val) + }), ) } return m @@ -596,20 +619,47 @@ func (h *Head) Init(minValidTime int64) error { if h.opts.EnableMemorySnapshotOnShutdown { level.Info(h.logger).Log("msg", "Chunk snapshot is enabled, replaying from the snapshot") - var err error - snapIdx, snapOffset, refSeries, err = h.loadChunkSnapshot() - if err != nil { - snapIdx, snapOffset = -1, 0 - refSeries = make(map[chunks.HeadSeriesRef]*memSeries) + // If there are any WAL files, there should be at least one WAL file with an index that is current or newer + // than the snapshot index. If the WAL index is behind the snapshot index somehow, the snapshot is assumed + // to be outdated. + loadSnapshot := true + if h.wal != nil { + _, endAt, err := wlog.Segments(h.wal.Dir()) + if err != nil { + return errors.Wrap(err, "finding WAL segments") + } - h.metrics.snapshotReplayErrorTotal.Inc() - level.Error(h.logger).Log("msg", "Failed to load chunk snapshot", "err", err) - // We clear the partially loaded data to replay fresh from the WAL. - if err := h.resetInMemoryState(); err != nil { - return err + _, idx, _, err := LastChunkSnapshot(h.opts.ChunkDirRoot) + if err != nil && err != record.ErrNotFound { + level.Error(h.logger).Log("msg", "Could not find last snapshot", "err", err) + } + + if err == nil && endAt < idx { + loadSnapshot = false + level.Warn(h.logger).Log("msg", "Last WAL file is behind snapshot, removing snapshots") + if err := DeleteChunkSnapshots(h.opts.ChunkDirRoot, math.MaxInt, math.MaxInt); err != nil { + level.Error(h.logger).Log("msg", "Error while deleting snapshot directories", "err", err) + } + } + } + if loadSnapshot { + var err error + snapIdx, snapOffset, refSeries, err = h.loadChunkSnapshot() + if err == nil { + level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", time.Since(start).String()) + } + if err != nil { + snapIdx, snapOffset = -1, 0 + refSeries = make(map[chunks.HeadSeriesRef]*memSeries) + + h.metrics.snapshotReplayErrorTotal.Inc() + level.Error(h.logger).Log("msg", "Failed to load chunk snapshot", "err", err) + // We clear the partially loaded data to replay fresh from the WAL. + if err := h.resetInMemoryState(); err != nil { + return err + } } } - level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", time.Since(start).String()) } mmapChunkReplayStart := time.Now() @@ -2073,93 +2123,3 @@ func (h *Head) updateWALReplayStatusRead(current int) { h.stats.WALReplayStatus.Current = current } - -func GenerateTestHistograms(n int) (r []*histogram.Histogram) { - for i := 0; i < n; i++ { - h := GenerateTestHistogram(i) - if i > 0 { - h.CounterResetHint = histogram.NotCounterReset - } - r = append(r, h) - } - return r -} - -// Generates a test histogram, it is up to the user to set any known counter reset hint. -func GenerateTestHistogram(i int) *histogram.Histogram { - return &histogram.Histogram{ - Count: 10 + uint64(i*8), - ZeroCount: 2 + uint64(i), - ZeroThreshold: 0.001, - Sum: 18.4 * float64(i+1), - Schema: 1, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{int64(i + 1), 1, -1, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []int64{int64(i + 1), 1, -1, 0}, - } -} - -func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) { - for x := 0; x < n; x++ { - r = append(r, GenerateTestGaugeHistogram(rand.Intn(n))) - } - return r -} - -func GenerateTestGaugeHistogram(i int) *histogram.Histogram { - h := GenerateTestHistogram(i) - h.CounterResetHint = histogram.GaugeType - return h -} - -func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) { - for i := 0; i < n; i++ { - h := GenerateTestFloatHistogram(i) - if i > 0 { - h.CounterResetHint = histogram.NotCounterReset - } - r = append(r, h) - } - return r -} - -// Generates a test float histogram, it is up to the user to set any known counter reset hint. -func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram { - return &histogram.FloatHistogram{ - Count: 10 + float64(i*8), - ZeroCount: 2 + float64(i), - ZeroThreshold: 0.001, - Sum: 18.4 * float64(i+1), - Schema: 1, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - NegativeBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)}, - } -} - -func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) { - for x := 0; x < n; x++ { - r = append(r, GenerateTestGaugeFloatHistogram(rand.Intn(n))) - } - return r -} - -func GenerateTestGaugeFloatHistogram(i int) *histogram.FloatHistogram { - h := GenerateTestFloatHistogram(i) - h.CounterResetHint = histogram.GaugeType - return h -} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go index fe42f282064..c27930cf3b1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go @@ -333,12 +333,10 @@ func (h *headChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { s.Unlock() return &safeChunk{ - Chunk: c.chunk, - s: s, - cid: cid, - isoState: h.isoState, - chunkDiskMapper: h.head.chunkDiskMapper, - memChunkPool: &h.head.memChunkPool, + Chunk: c.chunk, + s: s, + cid: cid, + isoState: h.isoState, }, nil } @@ -629,43 +627,24 @@ func (b boundedIterator) Seek(t int64) chunkenc.ValueType { // safeChunk makes sure that the chunk can be accessed without a race condition type safeChunk struct { chunkenc.Chunk - s *memSeries - cid chunks.HeadChunkID - isoState *isolationState - chunkDiskMapper chunkDiskMapper - memChunkPool *sync.Pool + s *memSeries + cid chunks.HeadChunkID + isoState *isolationState } func (c *safeChunk) Iterator(reuseIter chunkenc.Iterator) chunkenc.Iterator { c.s.Lock() - it := c.s.iterator(c.cid, c.isoState, c.chunkDiskMapper, c.memChunkPool, reuseIter) + it := c.s.iterator(c.cid, c.Chunk, c.isoState, reuseIter) c.s.Unlock() return it } // iterator returns a chunk iterator for the requested chunkID, or a NopIterator if the requested ID is out of range. // It is unsafe to call this concurrently with s.append(...) without holding the series lock. -func (s *memSeries) iterator(id chunks.HeadChunkID, isoState *isolationState, chunkDiskMapper chunkDiskMapper, memChunkPool *sync.Pool, it chunkenc.Iterator) chunkenc.Iterator { - c, garbageCollect, err := s.chunk(id, chunkDiskMapper, memChunkPool) - // TODO(fabxc): Work around! An error will be returns when a querier have retrieved a pointer to a - // series's chunk, which got then garbage collected before it got - // accessed. We must ensure to not garbage collect as long as any - // readers still hold a reference. - if err != nil { - return chunkenc.NewNopIterator() - } - defer func() { - if garbageCollect { - // Set this to nil so that Go GC can collect it after it has been used. - // This should be done always at the end. - c.chunk = nil - memChunkPool.Put(c) - } - }() - +func (s *memSeries) iterator(id chunks.HeadChunkID, c chunkenc.Chunk, isoState *isolationState, it chunkenc.Iterator) chunkenc.Iterator { ix := int(id) - int(s.firstChunkID) - numSamples := c.chunk.NumSamples() + numSamples := c.NumSamples() stopAfter := numSamples if isoState != nil && !isoState.IsolationDisabled() { @@ -710,9 +689,9 @@ func (s *memSeries) iterator(id chunks.HeadChunkID, isoState *isolationState, ch return chunkenc.NewNopIterator() } if stopAfter == numSamples { - return c.chunk.Iterator(it) + return c.Iterator(it) } - return makeStopIterator(c.chunk, it, stopAfter) + return makeStopIterator(c, it, stopAfter) } // stopIterator wraps an Iterator, but only returns the first diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go index f631b4fc4b3..1053fc4a9e9 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go @@ -18,7 +18,6 @@ import ( "math" "os" "path/filepath" - "runtime" "strconv" "strings" "sync" @@ -65,13 +64,13 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. // Start workers that each process samples for a partition of the series ID space. var ( wg sync.WaitGroup - n = runtime.GOMAXPROCS(0) - processors = make([]walSubsetProcessor, n) + concurrency = h.opts.WALReplayConcurrency + processors = make([]walSubsetProcessor, concurrency) exemplarsInput chan record.RefExemplar dec record.Decoder - shards = make([][]record.RefSample, n) - histogramShards = make([][]histogramRecord, n) + shards = make([][]record.RefSample, concurrency) + histogramShards = make([][]histogramRecord, concurrency) decoded = make(chan interface{}, 10) decodeErr, seriesCreationErr error @@ -116,7 +115,7 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. // For CorruptionErr ensure to terminate all workers before exiting. _, ok := err.(*wlog.CorruptionErr) if ok || seriesCreationErr != nil { - for i := 0; i < n; i++ { + for i := 0; i < concurrency; i++ { processors[i].closeAndDrain() } close(exemplarsInput) @@ -124,8 +123,8 @@ func (h *Head) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. } }() - wg.Add(n) - for i := 0; i < n; i++ { + wg.Add(concurrency) + for i := 0; i < concurrency; i++ { processors[i].setup() go func(wp *walSubsetProcessor) { @@ -276,7 +275,7 @@ Outer: multiRef[walSeries.Ref] = mSeries.ref } - idx := uint64(mSeries.ref) % uint64(n) + idx := uint64(mSeries.ref) % uint64(concurrency) processors[idx].input <- walSubsetProcessorInputItem{walSeriesRef: walSeries.Ref, existingSeries: mSeries} } //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. @@ -293,7 +292,7 @@ Outer: if len(samples) < m { m = len(samples) } - for i := 0; i < n; i++ { + for i := 0; i < concurrency; i++ { if shards[i] == nil { shards[i] = processors[i].reuseBuf() } @@ -305,10 +304,10 @@ Outer: if r, ok := multiRef[sam.Ref]; ok { sam.Ref = r } - mod := uint64(sam.Ref) % uint64(n) + mod := uint64(sam.Ref) % uint64(concurrency) shards[mod] = append(shards[mod], sam) } - for i := 0; i < n; i++ { + for i := 0; i < concurrency; i++ { if len(shards[i]) > 0 { processors[i].input <- walSubsetProcessorInputItem{samples: shards[i]} shards[i] = nil @@ -351,7 +350,7 @@ Outer: if len(samples) < m { m = len(samples) } - for i := 0; i < n; i++ { + for i := 0; i < concurrency; i++ { if histogramShards[i] == nil { histogramShards[i] = processors[i].reuseHistogramBuf() } @@ -363,10 +362,10 @@ Outer: if r, ok := multiRef[sam.Ref]; ok { sam.Ref = r } - mod := uint64(sam.Ref) % uint64(n) + mod := uint64(sam.Ref) % uint64(concurrency) histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, h: sam.H}) } - for i := 0; i < n; i++ { + for i := 0; i < concurrency; i++ { if len(histogramShards[i]) > 0 { processors[i].input <- walSubsetProcessorInputItem{histogramSamples: histogramShards[i]} histogramShards[i] = nil @@ -388,7 +387,7 @@ Outer: if len(samples) < m { m = len(samples) } - for i := 0; i < n; i++ { + for i := 0; i < concurrency; i++ { if histogramShards[i] == nil { histogramShards[i] = processors[i].reuseHistogramBuf() } @@ -400,10 +399,10 @@ Outer: if r, ok := multiRef[sam.Ref]; ok { sam.Ref = r } - mod := uint64(sam.Ref) % uint64(n) + mod := uint64(sam.Ref) % uint64(concurrency) histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, fh: sam.FH}) } - for i := 0; i < n; i++ { + for i := 0; i < concurrency; i++ { if len(histogramShards[i]) > 0 { processors[i].input <- walSubsetProcessorInputItem{histogramSamples: histogramShards[i]} histogramShards[i] = nil @@ -444,7 +443,7 @@ Outer: } // Signal termination to each worker and wait for it to close its output channel. - for i := 0; i < n; i++ { + for i := 0; i < concurrency; i++ { processors[i].closeAndDrain() } close(exemplarsInput) @@ -687,12 +686,12 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. lastSeq, lastOff := lastMmapRef.Unpack() // Start workers that each process samples for a partition of the series ID space. var ( - wg sync.WaitGroup - n = runtime.GOMAXPROCS(0) - processors = make([]wblSubsetProcessor, n) + wg sync.WaitGroup + concurrency = h.opts.WALReplayConcurrency + processors = make([]wblSubsetProcessor, concurrency) dec record.Decoder - shards = make([][]record.RefSample, n) + shards = make([][]record.RefSample, concurrency) decodedCh = make(chan interface{}, 10) decodeErr error @@ -714,15 +713,15 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. _, ok := err.(*wlog.CorruptionErr) if ok { err = &errLoadWbl{err: err} - for i := 0; i < n; i++ { + for i := 0; i < concurrency; i++ { processors[i].closeAndDrain() } wg.Wait() } }() - wg.Add(n) - for i := 0; i < n; i++ { + wg.Add(concurrency) + for i := 0; i < concurrency; i++ { processors[i].setup() go func(wp *wblSubsetProcessor) { @@ -781,17 +780,17 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. if len(samples) < m { m = len(samples) } - for i := 0; i < n; i++ { + for i := 0; i < concurrency; i++ { shards[i] = processors[i].reuseBuf() } for _, sam := range samples[:m] { if r, ok := multiRef[sam.Ref]; ok { sam.Ref = r } - mod := uint64(sam.Ref) % uint64(n) + mod := uint64(sam.Ref) % uint64(concurrency) shards[mod] = append(shards[mod], sam) } - for i := 0; i < n; i++ { + for i := 0; i < concurrency; i++ { processors[i].input <- shards[i] } samples = samples[m:] @@ -818,7 +817,7 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. mmapMarkerUnknownRefs.Inc() continue } - idx := uint64(ms.ref) % uint64(n) + idx := uint64(ms.ref) % uint64(concurrency) // It is possible that some old sample is being processed in processWALSamples that // could cause race below. So we wait for the goroutine to empty input the buffer and finish // processing all old samples after emptying the buffer. @@ -847,7 +846,7 @@ func (h *Head) loadWBL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks. } // Signal termination to each worker and wait for it to close its output channel. - for i := 0; i < n; i++ { + for i := 0; i < concurrency; i++ { processors[i].closeAndDrain() } wg.Wait() @@ -1383,18 +1382,18 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie var ( numSeries = 0 unknownRefs = int64(0) - n = runtime.GOMAXPROCS(0) + concurrency = h.opts.WALReplayConcurrency wg sync.WaitGroup - recordChan = make(chan chunkSnapshotRecord, 5*n) - shardedRefSeries = make([]map[chunks.HeadSeriesRef]*memSeries, n) - errChan = make(chan error, n) + recordChan = make(chan chunkSnapshotRecord, 5*concurrency) + shardedRefSeries = make([]map[chunks.HeadSeriesRef]*memSeries, concurrency) + errChan = make(chan error, concurrency) refSeries map[chunks.HeadSeriesRef]*memSeries exemplarBuf []record.RefExemplar dec record.Decoder ) - wg.Add(n) - for i := 0; i < n; i++ { + wg.Add(concurrency) + for i := 0; i < concurrency; i++ { go func(idx int, rc <-chan chunkSnapshotRecord) { defer wg.Done() defer func() { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/test.txt b/vendor/github.com/prometheus/prometheus/tsdb/test.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go index 87cc345dd0f..f9981ffe16d 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/chunks.go @@ -72,6 +72,8 @@ func ChunkFromSamplesGeneric(s Samples) chunks.Meta { ca.Append(s.Get(i).T(), s.Get(i).V()) case chunkenc.ValHistogram: ca.AppendHistogram(s.Get(i).T(), s.Get(i).H()) + case chunkenc.ValFloatHistogram: + ca.AppendFloatHistogram(s.Get(i).T(), s.Get(i).FH()) default: panic(fmt.Sprintf("unknown sample type %s", sampleType.String())) } @@ -128,12 +130,18 @@ func PopulatedChunk(numSamples int, minTime int64) chunks.Meta { // GenerateSamples starting at start and counting up numSamples. func GenerateSamples(start, numSamples int) []Sample { - samples := make([]Sample, 0, numSamples) - for i := start; i < start+numSamples; i++ { - samples = append(samples, sample{ + return generateSamples(start, numSamples, func(i int) Sample { + return sample{ t: int64(i), v: float64(i), - }) + } + }) +} + +func generateSamples(start, numSamples int, gen func(int) Sample) []Sample { + samples := make([]Sample, 0, numSamples) + for i := start; i < start+numSamples; i++ { + samples = append(samples, gen(i)) } return samples } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go new file mode 100644 index 00000000000..3c276c84119 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go @@ -0,0 +1,110 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tsdbutil + +import ( + "math/rand" + + "github.com/prometheus/prometheus/model/histogram" +) + +func GenerateTestHistograms(n int) (r []*histogram.Histogram) { + for i := 0; i < n; i++ { + h := GenerateTestHistogram(i) + if i > 0 { + h.CounterResetHint = histogram.NotCounterReset + } + r = append(r, h) + } + return r +} + +// GenerateTestHistogram but it is up to the user to set any known counter reset hint. +func GenerateTestHistogram(i int) *histogram.Histogram { + return &histogram.Histogram{ + Count: 10 + uint64(i*8), + ZeroCount: 2 + uint64(i), + ZeroThreshold: 0.001, + Sum: 18.4 * float64(i+1), + Schema: 1, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{int64(i + 1), 1, -1, 0}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []int64{int64(i + 1), 1, -1, 0}, + } +} + +func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) { + for x := 0; x < n; x++ { + r = append(r, GenerateTestGaugeHistogram(rand.Intn(n))) + } + return r +} + +func GenerateTestGaugeHistogram(i int) *histogram.Histogram { + h := GenerateTestHistogram(i) + h.CounterResetHint = histogram.GaugeType + return h +} + +func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) { + for i := 0; i < n; i++ { + h := GenerateTestFloatHistogram(i) + if i > 0 { + h.CounterResetHint = histogram.NotCounterReset + } + r = append(r, h) + } + return r +} + +// GenerateTestFloatHistogram but it is up to the user to set any known counter reset hint. +func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram { + return &histogram.FloatHistogram{ + Count: 10 + float64(i*8), + ZeroCount: 2 + float64(i), + ZeroThreshold: 0.001, + Sum: 18.4 * float64(i+1), + Schema: 1, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)}, + NegativeSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + NegativeBuckets: []float64{float64(i + 1), float64(i + 2), float64(i + 1), float64(i + 1)}, + } +} + +func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) { + for x := 0; x < n; x++ { + r = append(r, GenerateTestGaugeFloatHistogram(rand.Intn(n))) + } + return r +} + +func GenerateTestGaugeFloatHistogram(i int) *histogram.FloatHistogram { + h := GenerateTestFloatHistogram(i) + h.CounterResetHint = histogram.GaugeType + return h +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go index 5ae308d4eac..df8bab53ffe 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go @@ -199,9 +199,10 @@ type wlMetrics struct { truncateTotal prometheus.Counter currentSegment prometheus.Gauge writesFailed prometheus.Counter + walFileSize prometheus.GaugeFunc } -func newWLMetrics(r prometheus.Registerer) *wlMetrics { +func newWLMetrics(w *WL, r prometheus.Registerer) *wlMetrics { m := &wlMetrics{} m.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{ @@ -233,6 +234,17 @@ func newWLMetrics(r prometheus.Registerer) *wlMetrics { Name: "writes_failed_total", Help: "Total number of write log writes that failed.", }) + m.walFileSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Name: "storage_size_bytes", + Help: "Size of the write log directory.", + }, func() float64 { + val, err := w.Size() + if err != nil { + level.Error(w.logger).Log("msg", "Failed to calculate size of \"wal\" dir", + "err", err.Error()) + } + return float64(val) + }) if r != nil { r.MustRegister( @@ -243,6 +255,7 @@ func newWLMetrics(r prometheus.Registerer) *wlMetrics { m.truncateTotal, m.currentSegment, m.writesFailed, + m.walFileSize, ) } @@ -279,7 +292,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi if filepath.Base(dir) == WblDirName { prefix = "prometheus_tsdb_out_of_order_wbl_" } - w.metrics = newWLMetrics(prometheus.WrapRegistererWithPrefix(prefix, reg)) + w.metrics = newWLMetrics(w, prometheus.WrapRegistererWithPrefix(prefix, reg)) _, last, err := Segments(w.Dir()) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/util/strutil/strconv.go b/vendor/github.com/prometheus/prometheus/util/strutil/strconv.go index 5d62bac9c9f..8cdd7d48302 100644 --- a/vendor/github.com/prometheus/prometheus/util/strutil/strconv.go +++ b/vendor/github.com/prometheus/prometheus/util/strutil/strconv.go @@ -16,6 +16,7 @@ package strutil import ( "fmt" "net/url" + "strings" "github.com/grafana/regexp" ) @@ -38,6 +39,26 @@ func GraphLinkForExpression(expr string) string { // SanitizeLabelName replaces anything that doesn't match // client_label.LabelNameRE with an underscore. +// Note: this does not handle all Prometheus label name restrictions (such as +// not starting with a digit 0-9), and hence should only be used if the label +// name is prefixed with a known valid string. func SanitizeLabelName(name string) string { return invalidLabelCharRE.ReplaceAllString(name, "_") } + +// SanitizeFullLabelName replaces any invalid character with an underscore, and +// if given an empty string, returns a string containing a single underscore. +func SanitizeFullLabelName(name string) string { + if len(name) == 0 { + return "_" + } + var validSb strings.Builder + for i, b := range name { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + validSb.WriteRune('_') + } else { + validSb.WriteRune(b) + } + } + return validSb.String() +} diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index cfac908fe8b..d8ee24d6c96 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -421,7 +421,10 @@ func (api *API) query(r *http.Request) (result apiFuncResult) { defer cancel() } - opts := extractQueryOpts(r) + opts, err := extractQueryOpts(r) + if err != nil { + return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + } qry, err := api.QueryEngine.NewInstantQuery(api.Queryable, opts, r.FormValue("query"), ts) if err != nil { return invalidParamError(err, "query") @@ -466,10 +469,18 @@ func (api *API) formatQuery(r *http.Request) (result apiFuncResult) { return apiFuncResult{expr.Pretty(0), nil, nil, nil} } -func extractQueryOpts(r *http.Request) *promql.QueryOpts { - return &promql.QueryOpts{ +func extractQueryOpts(r *http.Request) (*promql.QueryOpts, error) { + opts := &promql.QueryOpts{ EnablePerStepStats: r.FormValue("stats") == "all", } + if strDuration := r.FormValue("lookback_delta"); strDuration != "" { + duration, err := parseDuration(strDuration) + if err != nil { + return nil, fmt.Errorf("error parsing lookback delta duration: %w", err) + } + opts.LookbackDelta = duration + } + return opts, nil } func (api *API) queryRange(r *http.Request) (result apiFuncResult) { @@ -513,7 +524,10 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { defer cancel() } - opts := extractQueryOpts(r) + opts, err := extractQueryOpts(r) + if err != nil { + return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + } qry, err := api.QueryEngine.NewRangeQuery(api.Queryable, opts, r.FormValue("query"), start, end, step) if err != nil { return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} diff --git a/vendor/go.uber.org/goleak/CHANGELOG.md b/vendor/go.uber.org/goleak/CHANGELOG.md index 761db2caa55..530f0a573f6 100644 --- a/vendor/go.uber.org/goleak/CHANGELOG.md +++ b/vendor/go.uber.org/goleak/CHANGELOG.md @@ -4,6 +4,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## [1.2.1] +### Changed +- Drop golang/x/lint dependency. + +[1.2.1]: https://github.com/uber-go/goleak/compare/v1.2.0...v1.2.1 + ## [1.2.0] ### Added - Add Cleanup option that can be used for registering cleanup callbacks. (#78) @@ -47,5 +53,7 @@ Thanks to @denis-tingajkin for their contributions to this release. [1.0.0]: https://github.com/uber-go/goleak/compare/v0.10.0...v1.0.0 -## 0.10.0 +## [0.10.0] - Initial release. + +[0.10.0]: https://github.com/uber-go/goleak/compare/v0.10.0...HEAD \ No newline at end of file diff --git a/vendor/go.uber.org/goleak/Makefile b/vendor/go.uber.org/goleak/Makefile index 53763fa8d11..8dbf7226568 100644 --- a/vendor/go.uber.org/goleak/Makefile +++ b/vendor/go.uber.org/goleak/Makefile @@ -1,6 +1,6 @@ export GOBIN ?= $(shell pwd)/bin -GOLINT = $(GOBIN)/golint +REVIVE = $(GOBIN)/revive GO_FILES := $(shell \ find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ @@ -24,18 +24,18 @@ cover: go test -race -coverprofile=cover.out -coverpkg=./... ./... go tool cover -html=cover.out -o cover.html -$(GOLINT): - go install golang.org/x/lint/golint +$(REVIVE): + cd tools && go install github.com/mgechev/revive .PHONY: lint -lint: $(GOLINT) +lint: $(REVIVE) @rm -rf lint.log @echo "Checking formatting..." @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log @echo "Checking vet..." @go vet ./... 2>&1 | tee -a lint.log @echo "Checking lint..." - @$(GOLINT) ./... 2>&1 | tee -a lint.log + @$(REVIVE) -set_exit_status ./... 2>&1 | tee -a lint.log @echo "Checking for unresolved FIXMEs..." @git grep -i fixme | grep -v -e '^vendor/' -e '^Makefile' | tee -a lint.log @[ ! -s lint.log ] diff --git a/vendor/go.uber.org/goleak/README.md b/vendor/go.uber.org/goleak/README.md index fb92dabc56d..a545b5e7792 100644 --- a/vendor/go.uber.org/goleak/README.md +++ b/vendor/go.uber.org/goleak/README.md @@ -8,7 +8,9 @@ You can use `go get` to get the latest version: `go get -u go.uber.org/goleak` -`goleak` also supports semver releases. It is compatible with Go 1.5+. +`goleak` also supports semver releases. + +Note that go-leak only [supports][release] the two most recent minor versions of Go. ## Quick Start @@ -69,3 +71,4 @@ No breaking changes will be made to exported APIs before 2.0. [ci]: https://github.com/uber-go/goleak/actions/workflows/go.yml [cov-img]: https://codecov.io/gh/uber-go/goleak/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/goleak +[release]: https://go.dev/doc/devel/release#policy diff --git a/vendor/go.uber.org/goleak/internal/stack/doc.go b/vendor/go.uber.org/goleak/internal/stack/doc.go new file mode 100644 index 00000000000..9179a56549d --- /dev/null +++ b/vendor/go.uber.org/goleak/internal/stack/doc.go @@ -0,0 +1,22 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package stack is used for parsing stacks from `runtime.Stack`. +package stack diff --git a/vendor/modules.txt b/vendor/modules.txt index b541d480c0c..d4beca77b8c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -137,7 +137,7 @@ github.com/armon/go-metrics/prometheus # github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d ## explicit; go 1.13 github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go v1.44.187 +# github.com/aws/aws-sdk-go v1.44.207 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/awserr @@ -790,7 +790,7 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.8.2-0.20220620125440-d7e7b8e04b5e => github.com/grafana/mimir-prometheus v0.0.0-20230309083841-242e82b8e667 +# github.com/prometheus/prometheus v1.8.2-0.20220620125440-d7e7b8e04b5e => github.com/grafana/mimir-prometheus v0.0.0-20230309145355-024edcdda34c ## explicit; go 1.18 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -1056,7 +1056,7 @@ go.opentelemetry.io/otel/trace # go.uber.org/atomic v1.10.0 ## explicit; go 1.18 go.uber.org/atomic -# go.uber.org/goleak v1.2.0 +# go.uber.org/goleak v1.2.1 ## explicit; go 1.18 go.uber.org/goleak go.uber.org/goleak/internal/stack @@ -1377,7 +1377,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/walk # sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml -# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20230309083841-242e82b8e667 +# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20230309145355-024edcdda34c # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe # github.com/vimeo/galaxycache => github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e # google.golang.org/grpc => google.golang.org/grpc v1.47.0 From 1be6b7b252521e5f71848d192e6a6ffeeb48f764 Mon Sep 17 00:00:00 2001 From: Yuri Nikolic Date: Fri, 10 Mar 2023 00:51:36 +0100 Subject: [PATCH 2/6] Fixing failing tests --- CHANGELOG.md | 6 +++--- cmd/mimir/config-descriptor.json | 2 +- cmd/mimir/help-all.txt.tmpl | 2 +- .../references/configuration-parameters/index.md | 4 +++- integration/compactor_test.go | 13 ++++++------- integration/util.go | 9 ++++----- pkg/ingester/ingester.go | 16 ++++++++++------ pkg/ingester/shipper.go | 2 +- pkg/mimir/mimir_test.go | 2 +- pkg/storage/tsdb/config.go | 5 ++--- pkg/util/test/histogram.go | 14 +++++++------- 11 files changed, 39 insertions(+), 36 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6d26ae23f67..a1fe399be99 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,7 @@ * `cortex_bucket_store_series_get_all_duration_seconds` * `cortex_bucket_store_series_merge_duration_seconds` * [CHANGE] Ingester: changed default value of `-blocks-storage.tsdb.retention-period` from `24h` to `13h`. If you're running Mimir with a custom configuration and you're overriding `-querier.query-store-after` to a value greater than the default `12h` then you should increase `-blocks-storage.tsdb.retention-period` accordingly. #4382 -* [CHANGE] Ingester: the configuration parameter `-blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup` has been deprecated and will be removed in Mimir 2.10. +* [CHANGE] Ingester: the configuration parameter `-blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup` has been deprecated and will be removed in Mimir 2.10. #4445 * [FEATURE] Cache: Introduce experimental support for using Redis for results, chunks, index, and metadata caches. #4371 * [ENHANCEMENT] Allow to define service name used for tracing via `JAEGER_SERVICE_NAME` environment variable. #4394 * [ENHANCEMENT] Querier and query-frontend: add experimental, more performant protobuf query result response format enabled with `-query-frontend.query-result-response-format=protobuf`. #4304 #4318 #4375 @@ -19,8 +19,8 @@ * [ENHANCEMENT] Ruler: increased tolerance for missed iterations on alerts, reducing the chances of flapping firing alerts during ruler restarts. #4432 * [ENHANCEMENT] Querier and store-gateway: optimized `.*` and `.+` regular expression label matchers. #4432 * [ENHANCEMENT] Query-frontend: results cache TTL is now configurable by using `-query-frontend.results-cache-ttl` and `-query-frontend.results-cache-ttl-for-out-of-order-time-window` options. These values can also be specified per tenant. Default values are unchanged (7 days and 10 minutes respectively). #4385 -* [BUGFIX] Querier: Streaming remote read will now continue to return multiple chunks per frame after the first frame. #4423 -* [ENHANCEMENT] Ingester: added advanced parameter `-blocks-storage.tsdb.wal-replay-concurrency` representing the maximum number of CPUs used during WAL replay. +* [ENHANCEMENT] Ingester: added advanced configuration parameter `-blocks-storage.tsdb.wal-replay-concurrency` representing the maximum number of CPUs used during WAL replay. #4445 +* [BUGFIX] Querier: Streaming remote read will now continue to return multiple chunks per frame after the first frame. #4423 ### Mixin diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index a872a6f5f1d..f225319ba10 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -7093,7 +7093,7 @@ "kind": "field", "name": "wal_replay_concurrency", "required": false, - "desc": "Maximum number of CPUs that can simultaneously processes WAL replay. 0 means disabled.", + "desc": "Maximum number of CPUs that can simultaneously processes WAL replay. 0 means disabled. If set to a positive value it overrides the deprecated blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup option", "fieldValue": null, "fieldDefaultValue": 0, "fieldFlag": "blocks-storage.tsdb.wal-replay-concurrency", diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index 221e807f8f0..09d7d43c6c3 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -680,7 +680,7 @@ Usage of ./cmd/mimir/mimir: -blocks-storage.tsdb.wal-compression-enabled True to enable TSDB WAL compression. -blocks-storage.tsdb.wal-replay-concurrency int - Maximum number of CPUs that can simultaneously processes WAL replay. 0 means disabled. + Maximum number of CPUs that can simultaneously processes WAL replay. 0 means disabled. If set to a positive value it overrides the deprecated blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup option -blocks-storage.tsdb.wal-segment-size-bytes int TSDB WAL segments files max size (bytes). (default 134217728) -common.storage.azure.account-key string diff --git a/docs/sources/mimir/references/configuration-parameters/index.md b/docs/sources/mimir/references/configuration-parameters/index.md index 69ff7cb6f97..46ef2ca2ec9 100644 --- a/docs/sources/mimir/references/configuration-parameters/index.md +++ b/docs/sources/mimir/references/configuration-parameters/index.md @@ -3268,7 +3268,9 @@ tsdb: [wal_segment_size_bytes: | default = 134217728] # (advanced) Maximum number of CPUs that can simultaneously processes WAL - # replay. 0 means disabled. + # replay. 0 means disabled. If set to a positive value it overrides the + # deprecated blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup + # option # CLI flag: -blocks-storage.tsdb.wal-replay-concurrency [wal_replay_concurrency: | default = 0] diff --git a/integration/compactor_test.go b/integration/compactor_test.go index 2decf210067..91042daa0f4 100644 --- a/integration/compactor_test.go +++ b/integration/compactor_test.go @@ -20,7 +20,6 @@ import ( "github.com/oklog/ulid" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/index" @@ -74,16 +73,16 @@ func TestCompactBlocksContainingNativeHistograms(t *testing.T) { Labels: labels.FromStrings("case", "native_histogram", "i", strconv.Itoa(i)), Chunks: []chunks.Meta{ tsdbutil.ChunkFromSamples([]tsdbutil.Sample{ - sample{10, 0, tsdb.GenerateTestHistogram(1), nil}, - sample{20, 0, tsdb.GenerateTestHistogram(2), nil}, + sample{10, 0, tsdbutil.GenerateTestHistogram(1), nil}, + sample{20, 0, tsdbutil.GenerateTestHistogram(2), nil}, }), tsdbutil.ChunkFromSamples([]tsdbutil.Sample{ - sample{30, 0, tsdb.GenerateTestHistogram(3), nil}, - sample{40, 0, tsdb.GenerateTestHistogram(4), nil}, + sample{30, 0, tsdbutil.GenerateTestHistogram(3), nil}, + sample{40, 0, tsdbutil.GenerateTestHistogram(4), nil}, }), tsdbutil.ChunkFromSamples([]tsdbutil.Sample{ - sample{50, 0, tsdb.GenerateTestHistogram(5), nil}, - sample{2*time.Hour.Milliseconds() - 1, 0, tsdb.GenerateTestHistogram(6), nil}, + sample{50, 0, tsdbutil.GenerateTestHistogram(5), nil}, + sample{2*time.Hour.Milliseconds() - 1, 0, tsdbutil.GenerateTestHistogram(6), nil}, }), }, } diff --git a/integration/util.go b/integration/util.go index 9a1f99b037b..85f2fbb9990 100644 --- a/integration/util.go +++ b/integration/util.go @@ -15,15 +15,14 @@ import ( "path/filepath" "time" + "github.com/grafana/e2e" "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/storage/remote" - "github.com/prometheus/prometheus/tsdb" - - "github.com/grafana/e2e" + "github.com/prometheus/prometheus/tsdb/tsdbutil" ) var ( @@ -117,11 +116,11 @@ func getTLSFlagsWithPrefix(prefix string, servername string, http bool) map[stri } func GenerateTestHistogram(i int) *histogram.Histogram { - return tsdb.GenerateTestHistograms(i + 1)[i] + return tsdbutil.GenerateTestHistograms(i + 1)[i] } func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram { - return tsdb.GenerateTestFloatHistograms(i + 1)[i] + return tsdbutil.GenerateTestFloatHistograms(i + 1)[i] } // explicit decoded version of GenerateTestHistogram and GenerateTestFloatHistogram diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 7038b1db43d..190d36379a5 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -110,7 +110,11 @@ const ( minOutOfOrderTimeWindowSecondsStatName = "ingester_ooo_min_window" maxOutOfOrderTimeWindowSecondsStatName = "ingester_ooo_max_window" - // maximum number of TSDBs present on the file system which can be opened in a single process without walReplayConcurrency + // Maximum number of TSDB users present on the file system which can be opened in a single process + // without concurrency. More precisely, if actual number of TSDB users is lower than this number, + // each TSDB is opened in a single process, while WAL replay is done in WALReplayConcurrency concurrent + // processes. Otherwise, TSDBs are opened in WALReplayConcurrency concurrent processes, while WAL replay + // is done in a single process. maxTSDBOpenWithoutConcurrency = 10 ) @@ -1842,7 +1846,7 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { queue := make(chan string) group, groupCtx := errgroup.WithContext(ctx) - userIDs, err := i.getAllUsersWithTSDB() + userIDs, err := i.getAllTSDBUserIDs() if err != nil { level.Error(i.logger).Log("msg", "error while finding existing TSDBs", "err", err) return err @@ -1896,7 +1900,7 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { }) } - // Spawn a goroutine to place on the queue all users with a TSDB found on the filesystem. + // Spawn a goroutine to place all users with a TSDB found on the filesystem in the queue. group.Go(func() error { defer close(queue) @@ -1927,8 +1931,8 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { return nil } -// getAllUsersWithTSDB finds all users with a TSDB on the filesystem. -func (i *Ingester) getAllUsersWithTSDB() (map[string]struct{}, error) { +// getAllTSDBUserIDs finds all users with a TSDB on the filesystem. +func (i *Ingester) getAllTSDBUserIDs() (map[string]struct{}, error) { userIDs := make(map[string]struct{}) walkErr := filepath.Walk(i.cfg.BlocksStorageConfig.TSDB.Dir, func(path string, info os.FileInfo, err error) error { if err != nil { @@ -2242,7 +2246,7 @@ func (i *Ingester) closeAndDeleteUserTSDBIfIdle(userID string) tsdbCloseCheckRes userDB.casState(closing, closed) // Only remove user from TSDBState when everything is cleaned up - // This will prevent walReplayConcurrency problems when cortex are trying to open new TSDB - Ie: New request for a given tenant + // This will prevent concurrency problems when cortex are trying to open new TSDB - Ie: New request for a given tenant // came in - while closing the tsdb for the same tenant. // If this happens now, the request will get reject as the push will not be able to acquire the lock as the tsdb will be // in closed state diff --git a/pkg/ingester/shipper.go b/pkg/ingester/shipper.go index 4f71b226bdf..46eef1948e2 100644 --- a/pkg/ingester/shipper.go +++ b/pkg/ingester/shipper.go @@ -112,7 +112,7 @@ func NewShipper( // Sync performs a single synchronization, which ensures all non-compacted local blocks have been uploaded // to the object bucket once. // -// It is not walReplayConcurrency-safe, however it is compactor-safe (running concurrently with compactor is ok). +// It is not concurrency-safe, however it is compactor-safe (running concurrently with compactor is ok). func (s *Shipper) Sync(ctx context.Context) (shipped int, err error) { shippedBlocks, err := readShippedBlocks(s.dir) if err != nil { diff --git a/pkg/mimir/mimir_test.go b/pkg/mimir/mimir_test.go index aac804b2bfc..a024c5ab2d7 100644 --- a/pkg/mimir/mimir_test.go +++ b/pkg/mimir/mimir_test.go @@ -417,7 +417,7 @@ func TestConfigValidation(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - err := tc.getTestConfig().Validate(nil) + err := tc.getTestConfig().Validate(util_log.Logger) if tc.expectAnyError { require.Error(t, err) } else if tc.expectedError != nil { diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index d6aff66f50a..2db835175ff 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -233,7 +233,7 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.StripeSize, "blocks-storage.tsdb.stripe-size", 16384, headStripeSizeHelp) f.BoolVar(&cfg.WALCompressionEnabled, "blocks-storage.tsdb.wal-compression-enabled", false, "True to enable TSDB WAL compression.") f.IntVar(&cfg.WALSegmentSizeBytes, "blocks-storage.tsdb.wal-segment-size-bytes", wlog.DefaultSegmentSize, "TSDB WAL segments files max size (bytes).") - f.IntVar(&cfg.WALReplayConcurrency, "blocks-storage.tsdb.wal-replay-concurrency", 0, "Maximum number of CPUs that can simultaneously processes WAL replay. 0 means disabled.") + f.IntVar(&cfg.WALReplayConcurrency, "blocks-storage.tsdb.wal-replay-concurrency", 0, "Maximum number of CPUs that can simultaneously processes WAL replay. 0 means disabled. If set to a positive value it overrides the deprecated "+maxTSDBOpeningConcurrencyOnStartupFlag+" option") f.BoolVar(&cfg.FlushBlocksOnShutdown, "blocks-storage.tsdb.flush-blocks-on-shutdown", false, "True to flush blocks to storage on shutdown. If false, incomplete blocks will be reused after restart.") f.DurationVar(&cfg.CloseIdleTSDBTimeout, "blocks-storage.tsdb.close-idle-tsdb-timeout", 13*time.Hour, "If TSDB has not received any data for this duration, and all blocks from TSDB have been shipped, TSDB is closed and deleted from local disk. If set to positive value, this value should be equal or higher than -querier.query-ingesters-within flag to make sure that TSDB is not closed prematurely, which could cause partial query results. 0 or negative value disables closing of idle TSDB.") f.BoolVar(&cfg.MemorySnapshotOnShutdown, "blocks-storage.tsdb.memory-snapshot-on-shutdown", false, "True to enable snapshotting of in-memory TSDB data on disk when shutting down.") @@ -252,9 +252,8 @@ func (cfg *TSDBConfig) Validate(logger log.Logger) error { if cfg.DeprecatedMaxTSDBOpeningConcurrencyOnStartup <= 0 { return errInvalidOpeningConcurrency - } else { - util.WarnDeprecatedConfig(maxTSDBOpeningConcurrencyOnStartupFlag, logger) } + util.WarnDeprecatedConfig(maxTSDBOpeningConcurrencyOnStartupFlag, logger) if cfg.HeadCompactionInterval <= 0 || cfg.HeadCompactionInterval > 15*time.Minute { return errInvalidCompactionInterval diff --git a/pkg/util/test/histogram.go b/pkg/util/test/histogram.go index f9abeb409e0..52106d4f790 100644 --- a/pkg/util/test/histogram.go +++ b/pkg/util/test/histogram.go @@ -5,32 +5,32 @@ package test import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/stretchr/testify/require" ) func GenerateTestHistograms(i int) []*histogram.Histogram { - return tsdb.GenerateTestHistograms(i) + return tsdbutil.GenerateTestHistograms(i) } func GenerateTestFloatHistograms(i int) []*histogram.FloatHistogram { - return tsdb.GenerateTestFloatHistograms(i) + return tsdbutil.GenerateTestFloatHistograms(i) } func GenerateTestHistogram(i int) *histogram.Histogram { - return tsdb.GenerateTestHistogram(i) + return tsdbutil.GenerateTestHistogram(i) } func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram { - return tsdb.GenerateTestFloatHistogram(i) + return tsdbutil.GenerateTestFloatHistogram(i) } func GenerateTestGaugeHistogram(i int) *histogram.Histogram { - return tsdb.GenerateTestGaugeHistogram(i) + return tsdbutil.GenerateTestGaugeHistogram(i) } func GenerateTestGaugeFloatHistogram(i int) *histogram.FloatHistogram { - return tsdb.GenerateTestGaugeFloatHistogram(i) + return tsdbutil.GenerateTestGaugeFloatHistogram(i) } // explicit decoded version of GenerateTestHistogram and GenerateTestFloatHistogram From 6cc8193d279dbc4a4a382e79261b8e3e4bb9f6f0 Mon Sep 17 00:00:00 2001 From: Yuri Nikolic Date: Fri, 10 Mar 2023 01:32:24 +0100 Subject: [PATCH 3/6] Trying to clean white noise --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a1fe399be99..99509097ab5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,7 +20,7 @@ * [ENHANCEMENT] Querier and store-gateway: optimized `.*` and `.+` regular expression label matchers. #4432 * [ENHANCEMENT] Query-frontend: results cache TTL is now configurable by using `-query-frontend.results-cache-ttl` and `-query-frontend.results-cache-ttl-for-out-of-order-time-window` options. These values can also be specified per tenant. Default values are unchanged (7 days and 10 minutes respectively). #4385 * [ENHANCEMENT] Ingester: added advanced configuration parameter `-blocks-storage.tsdb.wal-replay-concurrency` representing the maximum number of CPUs used during WAL replay. #4445 -* [BUGFIX] Querier: Streaming remote read will now continue to return multiple chunks per frame after the first frame. #4423 +* [BUGFIX] Querier: Streaming remote read will now continue to return multiple chunks per frame after the first frame. #4423 ### Mixin From 789fbf02c0d3ea2d5642c3b3413575d9b9e2f9a9 Mon Sep 17 00:00:00 2001 From: Yuri Nikolic Date: Fri, 10 Mar 2023 14:16:33 +0100 Subject: [PATCH 4/6] Fixing review findings --- cmd/mimir/config-descriptor.json | 2 +- cmd/mimir/help-all.txt.tmpl | 2 +- .../configure/about-versioning.md | 5 + .../configuration-parameters/index.md | 7 +- pkg/ingester/ingester.go | 78 +++++------ pkg/ingester/ingester_test.go | 123 +++++++----------- pkg/mimir/mimir_test.go | 2 +- pkg/storage/tsdb/config.go | 13 +- 8 files changed, 103 insertions(+), 129 deletions(-) diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index f225319ba10..12647aa373c 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -7093,7 +7093,7 @@ "kind": "field", "name": "wal_replay_concurrency", "required": false, - "desc": "Maximum number of CPUs that can simultaneously processes WAL replay. 0 means disabled. If set to a positive value it overrides the deprecated blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup option", + "desc": "Maximum number of CPUs that can simultaneously processes WAL replay. If it is set to 0, then each TSDB is replayed with a concurrency equal to the number of CPU cores available on the machine. If set to a positive value it overrides the deprecated blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup option", "fieldValue": null, "fieldDefaultValue": 0, "fieldFlag": "blocks-storage.tsdb.wal-replay-concurrency", diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index 09d7d43c6c3..6468ef3ef94 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -680,7 +680,7 @@ Usage of ./cmd/mimir/mimir: -blocks-storage.tsdb.wal-compression-enabled True to enable TSDB WAL compression. -blocks-storage.tsdb.wal-replay-concurrency int - Maximum number of CPUs that can simultaneously processes WAL replay. 0 means disabled. If set to a positive value it overrides the deprecated blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup option + Maximum number of CPUs that can simultaneously processes WAL replay. If it is set to 0, then each TSDB is replayed with a concurrency equal to the number of CPU cores available on the machine. If set to a positive value it overrides the deprecated blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup option -blocks-storage.tsdb.wal-segment-size-bytes int TSDB WAL segments files max size (bytes). (default 134217728) -common.storage.azure.account-key string diff --git a/docs/sources/mimir/operators-guide/configure/about-versioning.md b/docs/sources/mimir/operators-guide/configure/about-versioning.md index fdfcd9b67a5..4f76c4002b2 100644 --- a/docs/sources/mimir/operators-guide/configure/about-versioning.md +++ b/docs/sources/mimir/operators-guide/configure/about-versioning.md @@ -126,3 +126,8 @@ The following features are currently deprecated and will be **removed in Mimir 2 - `-blocks-storage.bucket-store.consistency-delay` - Ingester - `-ingester.ring.readiness-check-ring-health` + +The following features are currently deprecated and will be **removed in Mimir 2.10**: + +- Ingester + - `-blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup` diff --git a/docs/sources/mimir/references/configuration-parameters/index.md b/docs/sources/mimir/references/configuration-parameters/index.md index 46ef2ca2ec9..a861c06b59b 100644 --- a/docs/sources/mimir/references/configuration-parameters/index.md +++ b/docs/sources/mimir/references/configuration-parameters/index.md @@ -3268,9 +3268,10 @@ tsdb: [wal_segment_size_bytes: | default = 134217728] # (advanced) Maximum number of CPUs that can simultaneously processes WAL - # replay. 0 means disabled. If set to a positive value it overrides the - # deprecated blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup - # option + # replay. If it is set to 0, then each TSDB is replayed with a concurrency + # equal to the number of CPU cores available on the machine. If set to a + # positive value it overrides the deprecated + # blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup option # CLI flag: -blocks-storage.tsdb.wal-replay-concurrency [wal_replay_concurrency: | default = 0] diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 190d36379a5..051ef67bba7 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -110,11 +110,8 @@ const ( minOutOfOrderTimeWindowSecondsStatName = "ingester_ooo_min_window" maxOutOfOrderTimeWindowSecondsStatName = "ingester_ooo_max_window" - // Maximum number of TSDB users present on the file system which can be opened in a single process - // without concurrency. More precisely, if actual number of TSDB users is lower than this number, - // each TSDB is opened in a single process, while WAL replay is done in WALReplayConcurrency concurrent - // processes. Otherwise, TSDBs are opened in WALReplayConcurrency concurrent processes, while WAL replay - // is done in a single process. + // Value used to track the limit between sequential and concurrent TSDB opernings. + // Below this value, TSDBs of different tenants are opened sequentially, otherwise concurrently. maxTSDBOpenWithoutConcurrency = 10 ) @@ -1685,7 +1682,7 @@ func (i *Ingester) getOrCreateTSDB(userID string, force bool) (*userTSDB, error) } // Create the database and a shipper for a user - db, err := i.createTSDB(userID) + db, err := i.createTSDB(userID, 0) if err != nil { return nil, err } @@ -1697,10 +1694,8 @@ func (i *Ingester) getOrCreateTSDB(userID string, force bool) (*userTSDB, error) return db, nil } -type tsdbOption func(*tsdb.Options) - // createTSDB creates a TSDB for a given userID, and returns the created db. -func (i *Ingester) createTSDB(userID string, additionalTsdbOptions ...tsdbOption) (*userTSDB, error) { +func (i *Ingester) createTSDB(userID string, walReplayConcurrency int) (*userTSDB, error) { tsdbPromReg := prometheus.NewRegistry() udir := i.cfg.BlocksStorageConfig.TSDB.BlocksDir(userID) userLogger := util_log.WithUserID(userID, i.logger) @@ -1721,7 +1716,8 @@ func (i *Ingester) createTSDB(userID string, additionalTsdbOptions ...tsdbOption maxExemplars := i.limiter.convertGlobalToLocalLimit(userID, i.limits.MaxGlobalExemplarsPerUser(userID)) oooTW := i.limits.OutOfOrderTimeWindow(userID) - tsdbOptions := &tsdb.Options{ + // Create a new user database + db, err := tsdb.Open(udir, userLogger, tsdbPromReg, &tsdb.Options{ RetentionDuration: i.cfg.BlocksStorageConfig.TSDB.Retention.Milliseconds(), MinBlockDuration: blockRanges[0], MaxBlockDuration: blockRanges[len(blockRanges)-1], @@ -1731,6 +1727,7 @@ func (i *Ingester) createTSDB(userID string, additionalTsdbOptions ...tsdbOption HeadChunksEndTimeVariance: i.cfg.BlocksStorageConfig.TSDB.HeadChunksEndTimeVariance, WALCompression: i.cfg.BlocksStorageConfig.TSDB.WALCompressionEnabled, WALSegmentSize: i.cfg.BlocksStorageConfig.TSDB.WALSegmentSizeBytes, + WALReplayConcurrency: walReplayConcurrency, SeriesLifecycleCallback: userDB, BlocksToDelete: userDB.blocksToDelete, EnableExemplarStorage: true, // enable for everyone so we can raise the limit later @@ -1746,12 +1743,7 @@ func (i *Ingester) createTSDB(userID string, additionalTsdbOptions ...tsdbOption HeadPostingsForMatchersCacheSize: i.cfg.BlocksStorageConfig.TSDB.HeadPostingsForMatchersCacheSize, HeadPostingsForMatchersCacheForce: i.cfg.BlocksStorageConfig.TSDB.HeadPostingsForMatchersCacheForce, EnableNativeHistograms: i.limits.NativeHistogramsIngestionEnabled(userID), - } - for _, tsdbOption := range additionalTsdbOptions { - tsdbOption(tsdbOptions) - } - // Create a new user database - db, err := tsdb.Open(udir, userLogger, tsdbPromReg, tsdbOptions, nil) + }, nil) if err != nil { return nil, errors.Wrapf(err, "failed to open TSDB: %s", udir) } @@ -1846,7 +1838,7 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { queue := make(chan string) group, groupCtx := errgroup.WithContext(ctx) - userIDs, err := i.getAllTSDBUserIDs() + userIDs, err := i.findUserIDsWithTSDBOnFilesystem() if err != nil { level.Error(i.logger).Log("msg", "error while finding existing TSDBs", "err", err) return err @@ -1856,32 +1848,15 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { return nil } - var concurrentOpenTSDBCount = i.cfg.BlocksStorageConfig.TSDB.DeprecatedMaxTSDBOpeningConcurrencyOnStartup - var walReplayConcurrency = 0 - // If TSDBConfig.WALReplayConcurrency is set to a positive value, we honor it and ignore value of - // TSDB.DeprecatedMaxTSDBOpeningConcurrencyOnStartup, being the latter deprecated. - // If TSDBConfig.WALReplayConcurrency is 0, it is ignored, and TSDB.DeprecatedMaxTSDBOpeningConcurrencyOnStartup - // determines the number of concurrent processes opening TSDBs. - if i.cfg.BlocksStorageConfig.TSDB.WALReplayConcurrency > 0 { - if len(userIDs) <= maxTSDBOpenWithoutConcurrency { - concurrentOpenTSDBCount = 1 - walReplayConcurrency = i.cfg.BlocksStorageConfig.TSDB.WALReplayConcurrency - } else { - concurrentOpenTSDBCount = i.cfg.BlocksStorageConfig.TSDB.WALReplayConcurrency - walReplayConcurrency = 1 - } - } - walReplayConcurrencyOption := func(tsdbOptions *tsdb.Options) { - tsdbOptions.WALReplayConcurrency = walReplayConcurrency - } + tsdbOpenConcurrency, walReplayConcurrency := i.getConcurrencyConfig(len(userIDs)) // Create a pool of workers which will open existing TSDBs. - for n := 0; n < concurrentOpenTSDBCount; n++ { + for n := 0; n < tsdbOpenConcurrency; n++ { group.Go(func() error { for userID := range queue { startTime := time.Now() - db, err := i.createTSDB(userID, walReplayConcurrencyOption) + db, err := i.createTSDB(userID, walReplayConcurrency) if err != nil { level.Error(i.logger).Log("msg", "unable to open TSDB", "err", err, "user", userID) return errors.Wrapf(err, "unable to open TSDB for user %s", userID) @@ -1904,7 +1879,7 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { group.Go(func() error { defer close(queue) - for userID := range userIDs { + for _, userID := range userIDs { // Enqueue the user to be processed. select { case queue <- userID: @@ -1931,9 +1906,28 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { return nil } -// getAllTSDBUserIDs finds all users with a TSDB on the filesystem. -func (i *Ingester) getAllTSDBUserIDs() (map[string]struct{}, error) { - userIDs := make(map[string]struct{}) +func (i *Ingester) getConcurrencyConfig(userCount int) (tsdbOpenConcurrency, walReplayConcurrency int) { + tsdbOpenConcurrency = i.cfg.BlocksStorageConfig.TSDB.DeprecatedMaxTSDBOpeningConcurrencyOnStartup + walReplayConcurrency = 0 + // If TSDBConfig.WALReplayConcurrency is set to a positive value, we honor it and ignore value of + // TSDB.DeprecatedMaxTSDBOpeningConcurrencyOnStartup, being the latter deprecated. + // If TSDBConfig.WALReplayConcurrency is 0, it is ignored, and TSDB.DeprecatedMaxTSDBOpeningConcurrencyOnStartup + // determines the number of concurrent processes opening TSDBs. + if i.cfg.BlocksStorageConfig.TSDB.WALReplayConcurrency > 0 { + if userCount <= maxTSDBOpenWithoutConcurrency { + tsdbOpenConcurrency = 1 + walReplayConcurrency = i.cfg.BlocksStorageConfig.TSDB.WALReplayConcurrency + } else { + tsdbOpenConcurrency = i.cfg.BlocksStorageConfig.TSDB.WALReplayConcurrency + walReplayConcurrency = 1 + } + } + return +} + +// findUserIDsWithTSDBOnFilesystem finds all users with a TSDB on the filesystem. +func (i *Ingester) findUserIDsWithTSDBOnFilesystem() ([]string, error) { + var userIDs []string walkErr := filepath.Walk(i.cfg.BlocksStorageConfig.TSDB.Dir, func(path string, info os.FileInfo, err error) error { if err != nil { // If the root directory doesn't exist, we're OK (not needed to be created upfront). @@ -1970,7 +1964,7 @@ func (i *Ingester) getAllTSDBUserIDs() (map[string]struct{}, error) { } // Save userId. - userIDs[userID] = struct{}{} + userIDs = append(userIDs, userID) // Don't descend into subdirectories. return filepath.SkipDir diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index dfabf386908..6bb0f32b636 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -3182,87 +3182,65 @@ func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) { "should load TSDB for any non-empty user directory": { walReplayConcurrency: 10, setup: func(t *testing.T, dir string) { - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user0", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user1", "dummy"), 0700)) + for _, userID := range []string{"user0", "user1"} { + require.NoError(t, os.MkdirAll(filepath.Join(dir, userID, "dummy"), 0700)) + } require.NoError(t, os.Mkdir(filepath.Join(dir, "user2"), 0700)) }, check: func(t *testing.T, i *Ingester) { require.Equal(t, 2, len(i.tsdbs)) - require.NotNil(t, i.getTSDB("user0")) - require.NotNil(t, i.getTSDB("user1")) + for _, userID := range []string{"user0", "user1"} { + require.NotNil(t, i.getTSDB(userID)) + } require.Nil(t, i.getTSDB("user2")) }, }, "should load all TSDBs on walReplayConcurrency < number of TSDBs": { walReplayConcurrency: 2, setup: func(t *testing.T, dir string) { - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user0", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user1", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user2", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user3", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user4", "dummy"), 0700)) + for _, userID := range []string{"user0", "user1", "user2", "user3", "user4"} { + require.NoError(t, os.MkdirAll(filepath.Join(dir, userID, "dummy"), 0700)) + } }, check: func(t *testing.T, i *Ingester) { require.Equal(t, 5, len(i.tsdbs)) - require.NotNil(t, i.getTSDB("user0")) - require.NotNil(t, i.getTSDB("user1")) - require.NotNil(t, i.getTSDB("user2")) - require.NotNil(t, i.getTSDB("user3")) - require.NotNil(t, i.getTSDB("user4")) - walReplayConcurrency := getWALReplayConcurrencyFromTSDBHeadOptions(i.getTSDB("user0")) - require.Equal(t, 2, walReplayConcurrency) + for _, userID := range []string{"user0", "user1", "user2", "user3", "user4"} { + require.NotNil(t, i.getTSDB(userID)) + walReplayConcurrency := getWALReplayConcurrencyFromTSDBHeadOptions(i.getTSDB(userID)) + require.Equal(t, 2, walReplayConcurrency) + } }, }, "should load all TSDBs on walReplayConcurrency > number of TSDBs": { walReplayConcurrency: 10, setup: func(t *testing.T, dir string) { - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user0", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user1", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user2", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user3", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user4", "dummy"), 0700)) + for _, userID := range []string{"user0", "user1", "user2", "user3", "user4"} { + require.NoError(t, os.MkdirAll(filepath.Join(dir, userID, "dummy"), 0700)) + } }, check: func(t *testing.T, i *Ingester) { require.Equal(t, 5, len(i.tsdbs)) - require.NotNil(t, i.getTSDB("user0")) - require.NotNil(t, i.getTSDB("user1")) - require.NotNil(t, i.getTSDB("user2")) - require.NotNil(t, i.getTSDB("user3")) - require.NotNil(t, i.getTSDB("user4")) - walReplayConcurrency := getWALReplayConcurrencyFromTSDBHeadOptions(i.getTSDB("user0")) - require.Equal(t, 10, walReplayConcurrency) + for _, userID := range []string{"user0", "user1", "user2", "user3", "user4"} { + require.NotNil(t, i.getTSDB(userID)) + walReplayConcurrency := getWALReplayConcurrencyFromTSDBHeadOptions(i.getTSDB(userID)) + require.Equal(t, 10, walReplayConcurrency) + } }, }, "should load all TSDBs on number of TSDBs > maxTSDBOpenWithoutConcurrency": { walReplayConcurrency: 2, setup: func(t *testing.T, dir string) { - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user0", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user1", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user2", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user3", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user4", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user5", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user6", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user7", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user8", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user9", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user10", "dummy"), 0700)) + for _, userID := range []string{"user0", "user1", "user2", "user3", "user4", "user5", "user6", "user7", "user8", "user9", "user10"} { + require.NoError(t, os.MkdirAll(filepath.Join(dir, userID, "dummy"), 0700)) + } }, check: func(t *testing.T, i *Ingester) { require.Equal(t, 11, len(i.tsdbs)) - require.NotNil(t, i.getTSDB("user0")) - require.NotNil(t, i.getTSDB("user1")) - require.NotNil(t, i.getTSDB("user2")) - require.NotNil(t, i.getTSDB("user3")) - require.NotNil(t, i.getTSDB("user4")) - require.NotNil(t, i.getTSDB("user5")) - require.NotNil(t, i.getTSDB("user6")) - require.NotNil(t, i.getTSDB("user7")) - require.NotNil(t, i.getTSDB("user8")) - require.NotNil(t, i.getTSDB("user9")) - require.NotNil(t, i.getTSDB("user10")) - walReplayConcurrency := getWALReplayConcurrencyFromTSDBHeadOptions(i.getTSDB("user0")) - require.Equal(t, 1, walReplayConcurrency) + for _, userID := range []string{"user0", "user1", "user2", "user3", "user4", "user5", "user6", "user7", "user8", "user9", "user10"} { + require.NotNil(t, i.getTSDB(userID)) + walReplayConcurrency := getWALReplayConcurrencyFromTSDBHeadOptions(i.getTSDB(userID)) + require.Equal(t, 1, walReplayConcurrency) + } }, }, "should fail and rollback if an error occur while loading a TSDB on walReplayConcurrency > number of TSDBs": { @@ -3279,18 +3257,18 @@ func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) { }, check: func(t *testing.T, i *Ingester) { require.Equal(t, 0, len(i.tsdbs)) - require.Nil(t, i.getTSDB("user0")) - require.Nil(t, i.getTSDB("user1")) + for _, userID := range []string{"user0", "user1"} { + require.Nil(t, i.getTSDB(userID)) + } }, expectedErr: "unable to open TSDB for user user0", }, "should fail and rollback if an error occur while loading a TSDB on walReplayConcurrency < number of TSDBs": { walReplayConcurrency: 2, setup: func(t *testing.T, dir string) { - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user0", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user1", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user3", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user4", "dummy"), 0700)) + for _, userID := range []string{"user0", "user1", "user3", "user4"} { + require.NoError(t, os.MkdirAll(filepath.Join(dir, userID, "dummy"), 0700)) + } // Create a fake TSDB on disk with an empty chunks head segment file (it's invalid unless // it's the last one and opening TSDB should fail). @@ -3300,12 +3278,9 @@ func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) { require.NoError(t, os.WriteFile(filepath.Join(dir, "user2", "chunks_head", "00000002"), nil, 0700)) }, check: func(t *testing.T, i *Ingester) { - require.Equal(t, 0, len(i.tsdbs)) - require.Nil(t, i.getTSDB("user0")) - require.Nil(t, i.getTSDB("user1")) - require.Nil(t, i.getTSDB("user2")) - require.Nil(t, i.getTSDB("user3")) - require.Nil(t, i.getTSDB("user4")) + for _, userID := range []string{"user0", "user1"} { + require.Nil(t, i.getTSDB(userID)) + } }, expectedErr: "unable to open TSDB for user user2", }, @@ -3313,21 +3288,17 @@ func TestIngester_OpenExistingTSDBOnStartup(t *testing.T) { walReplayConcurrency: 0, deprecatedMaxTSDBOpeningConcurrencyOnStartup: 2, setup: func(t *testing.T, dir string) { - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user0", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user1", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user2", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user3", "dummy"), 0700)) - require.NoError(t, os.MkdirAll(filepath.Join(dir, "user4", "dummy"), 0700)) + for _, userID := range []string{"user0", "user1", "user2", "user3", "user4"} { + require.NoError(t, os.MkdirAll(filepath.Join(dir, userID, "dummy"), 0700)) + } }, check: func(t *testing.T, i *Ingester) { require.Equal(t, 5, len(i.tsdbs)) - require.NotNil(t, i.getTSDB("user0")) - require.NotNil(t, i.getTSDB("user1")) - require.NotNil(t, i.getTSDB("user2")) - require.NotNil(t, i.getTSDB("user3")) - require.NotNil(t, i.getTSDB("user4")) - walReplayConcurrency := getWALReplayConcurrencyFromTSDBHeadOptions(i.getTSDB("user0")) - require.NotEqual(t, 0, walReplayConcurrency) + for _, userID := range []string{"user0", "user1", "user2", "user3", "user4"} { + require.NotNil(t, i.getTSDB(userID)) + walReplayConcurrency := getWALReplayConcurrencyFromTSDBHeadOptions(i.getTSDB(userID)) + require.NotEqual(t, 0, walReplayConcurrency) + } }, }, } diff --git a/pkg/mimir/mimir_test.go b/pkg/mimir/mimir_test.go index a024c5ab2d7..251469ebeae 100644 --- a/pkg/mimir/mimir_test.go +++ b/pkg/mimir/mimir_test.go @@ -417,7 +417,7 @@ func TestConfigValidation(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - err := tc.getTestConfig().Validate(util_log.Logger) + err := tc.getTestConfig().Validate(log.NewNopLogger()) if tc.expectAnyError { require.Error(t, err) } else if tc.expectedError != nil { diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index 2db835175ff..4f0a7d12a2c 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -82,8 +82,9 @@ const ( headPostingsForMatchersCacheSizeHelp = "Maximum number of entries in the cache for postings for matchers in the Head and OOOHead when ttl > 0." headPostingsForMatchersCacheForce = "Force the cache to be used for postings for matchers in the Head and OOOHead, even if it's not a concurrent (query-sharding) call." - consistencyDelayFlag = "blocks-storage.bucket-store.consistency-delay" - maxTSDBOpeningConcurrencyOnStartupFlag = "blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup" + consistencyDelayFlag = "blocks-storage.bucket-store.consistency-delay" + maxTSDBOpeningConcurrencyOnStartupFlag = "blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup" + defaultMaxTSDBOpeningConcurrencyOnStartup = 10 ) // Validation errors @@ -224,7 +225,7 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.ShipInterval, "blocks-storage.tsdb.ship-interval", 1*time.Minute, "How frequently the TSDB blocks are scanned and new ones are shipped to the storage. 0 means shipping is disabled.") f.IntVar(&cfg.ShipConcurrency, "blocks-storage.tsdb.ship-concurrency", 10, "Maximum number of tenants concurrently shipping blocks to the storage.") f.Uint64Var(&cfg.SeriesHashCacheMaxBytes, "blocks-storage.tsdb.series-hash-cache-max-size-bytes", uint64(1*units.Gibibyte), "Max size - in bytes - of the in-memory series hash cache. The cache is shared across all tenants and it's used only when query sharding is enabled.") - f.IntVar(&cfg.DeprecatedMaxTSDBOpeningConcurrencyOnStartup, maxTSDBOpeningConcurrencyOnStartupFlag, 10, "limit the number of concurrently opening TSDB's on startup") + f.IntVar(&cfg.DeprecatedMaxTSDBOpeningConcurrencyOnStartup, maxTSDBOpeningConcurrencyOnStartupFlag, defaultMaxTSDBOpeningConcurrencyOnStartup, "limit the number of concurrently opening TSDB's on startup") f.DurationVar(&cfg.HeadCompactionInterval, "blocks-storage.tsdb.head-compaction-interval", 1*time.Minute, "How frequently the ingester checks whether the TSDB head should be compacted and, if so, triggers the compaction. Mimir applies a jitter to the first check, while subsequent checks will happen at the configured interval. Block is only created if data covers smallest block range. The configured interval must be between 0 and 15 minutes.") f.IntVar(&cfg.HeadCompactionConcurrency, "blocks-storage.tsdb.head-compaction-concurrency", 1, "Maximum number of tenants concurrently compacting TSDB head into a new block") f.DurationVar(&cfg.HeadCompactionIdleTimeout, "blocks-storage.tsdb.head-compaction-idle-timeout", 1*time.Hour, "If TSDB head is idle for this duration, it is compacted. Note that up to 25% jitter is added to the value to avoid ingesters compacting concurrently. 0 means disabled.") @@ -233,7 +234,7 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.StripeSize, "blocks-storage.tsdb.stripe-size", 16384, headStripeSizeHelp) f.BoolVar(&cfg.WALCompressionEnabled, "blocks-storage.tsdb.wal-compression-enabled", false, "True to enable TSDB WAL compression.") f.IntVar(&cfg.WALSegmentSizeBytes, "blocks-storage.tsdb.wal-segment-size-bytes", wlog.DefaultSegmentSize, "TSDB WAL segments files max size (bytes).") - f.IntVar(&cfg.WALReplayConcurrency, "blocks-storage.tsdb.wal-replay-concurrency", 0, "Maximum number of CPUs that can simultaneously processes WAL replay. 0 means disabled. If set to a positive value it overrides the deprecated "+maxTSDBOpeningConcurrencyOnStartupFlag+" option") + f.IntVar(&cfg.WALReplayConcurrency, "blocks-storage.tsdb.wal-replay-concurrency", 0, "Maximum number of CPUs that can simultaneously processes WAL replay. If it is set to 0, then each TSDB is replayed with a concurrency equal to the number of CPU cores available on the machine. If set to a positive value it overrides the deprecated "+maxTSDBOpeningConcurrencyOnStartupFlag+" option") f.BoolVar(&cfg.FlushBlocksOnShutdown, "blocks-storage.tsdb.flush-blocks-on-shutdown", false, "True to flush blocks to storage on shutdown. If false, incomplete blocks will be reused after restart.") f.DurationVar(&cfg.CloseIdleTSDBTimeout, "blocks-storage.tsdb.close-idle-tsdb-timeout", 13*time.Hour, "If TSDB has not received any data for this duration, and all blocks from TSDB have been shipped, TSDB is closed and deleted from local disk. If set to positive value, this value should be equal or higher than -querier.query-ingesters-within flag to make sure that TSDB is not closed prematurely, which could cause partial query results. 0 or negative value disables closing of idle TSDB.") f.BoolVar(&cfg.MemorySnapshotOnShutdown, "blocks-storage.tsdb.memory-snapshot-on-shutdown", false, "True to enable snapshotting of in-memory TSDB data on disk when shutting down.") @@ -253,7 +254,9 @@ func (cfg *TSDBConfig) Validate(logger log.Logger) error { if cfg.DeprecatedMaxTSDBOpeningConcurrencyOnStartup <= 0 { return errInvalidOpeningConcurrency } - util.WarnDeprecatedConfig(maxTSDBOpeningConcurrencyOnStartupFlag, logger) + if cfg.DeprecatedMaxTSDBOpeningConcurrencyOnStartup != defaultMaxTSDBOpeningConcurrencyOnStartup { + util.WarnDeprecatedConfig(maxTSDBOpeningConcurrencyOnStartupFlag, logger) + } if cfg.HeadCompactionInterval <= 0 || cfg.HeadCompactionInterval > 15*time.Minute { return errInvalidCompactionInterval From 3b82cfea15248ff1a0df32980b94b58cc94e5952 Mon Sep 17 00:00:00 2001 From: Yuri Nikolic Date: Fri, 10 Mar 2023 15:23:29 +0100 Subject: [PATCH 5/6] Fixing review findings --- cmd/mimir/config-descriptor.json | 2 +- cmd/mimir/help-all.txt.tmpl | 2 +- .../configuration-parameters/index.md | 2 +- pkg/ingester/ingester.go | 27 ++++++------ pkg/ingester/ingester_test.go | 43 +++++++++++++++++++ pkg/storage/tsdb/config.go | 2 +- 6 files changed, 61 insertions(+), 17 deletions(-) diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index 12647aa373c..a41fe89c14f 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -7093,7 +7093,7 @@ "kind": "field", "name": "wal_replay_concurrency", "required": false, - "desc": "Maximum number of CPUs that can simultaneously processes WAL replay. If it is set to 0, then each TSDB is replayed with a concurrency equal to the number of CPU cores available on the machine. If set to a positive value it overrides the deprecated blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup option", + "desc": "Maximum number of CPUs that can simultaneously processes WAL replay. If it is set to 0, then each TSDB is replayed with a concurrency equal to the number of CPU cores available on the machine. If set to a positive value it overrides the deprecated -blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup option.", "fieldValue": null, "fieldDefaultValue": 0, "fieldFlag": "blocks-storage.tsdb.wal-replay-concurrency", diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index 6468ef3ef94..b0e10d7fd81 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -680,7 +680,7 @@ Usage of ./cmd/mimir/mimir: -blocks-storage.tsdb.wal-compression-enabled True to enable TSDB WAL compression. -blocks-storage.tsdb.wal-replay-concurrency int - Maximum number of CPUs that can simultaneously processes WAL replay. If it is set to 0, then each TSDB is replayed with a concurrency equal to the number of CPU cores available on the machine. If set to a positive value it overrides the deprecated blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup option + Maximum number of CPUs that can simultaneously processes WAL replay. If it is set to 0, then each TSDB is replayed with a concurrency equal to the number of CPU cores available on the machine. If set to a positive value it overrides the deprecated -blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup option. -blocks-storage.tsdb.wal-segment-size-bytes int TSDB WAL segments files max size (bytes). (default 134217728) -common.storage.azure.account-key string diff --git a/docs/sources/mimir/references/configuration-parameters/index.md b/docs/sources/mimir/references/configuration-parameters/index.md index a861c06b59b..1ff1415ad02 100644 --- a/docs/sources/mimir/references/configuration-parameters/index.md +++ b/docs/sources/mimir/references/configuration-parameters/index.md @@ -3271,7 +3271,7 @@ tsdb: # replay. If it is set to 0, then each TSDB is replayed with a concurrency # equal to the number of CPU cores available on the machine. If set to a # positive value it overrides the deprecated - # blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup option + # -blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup option. # CLI flag: -blocks-storage.tsdb.wal-replay-concurrency [wal_replay_concurrency: | default = 0] diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 051ef67bba7..705ddfba112 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -1848,7 +1848,7 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { return nil } - tsdbOpenConcurrency, walReplayConcurrency := i.getConcurrencyConfig(len(userIDs)) + tsdbOpenConcurrency, tsdbWALReplayConcurrency := getOpenTSDBsConcurrencyConfig(i.cfg.BlocksStorageConfig.TSDB, len(userIDs)) // Create a pool of workers which will open existing TSDBs. for n := 0; n < tsdbOpenConcurrency; n++ { @@ -1856,7 +1856,7 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { for userID := range queue { startTime := time.Now() - db, err := i.createTSDB(userID, walReplayConcurrency) + db, err := i.createTSDB(userID, tsdbWALReplayConcurrency) if err != nil { level.Error(i.logger).Log("msg", "unable to open TSDB", "err", err, "user", userID) return errors.Wrapf(err, "unable to open TSDB for user %s", userID) @@ -1906,20 +1906,21 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { return nil } -func (i *Ingester) getConcurrencyConfig(userCount int) (tsdbOpenConcurrency, walReplayConcurrency int) { - tsdbOpenConcurrency = i.cfg.BlocksStorageConfig.TSDB.DeprecatedMaxTSDBOpeningConcurrencyOnStartup - walReplayConcurrency = 0 - // If TSDBConfig.WALReplayConcurrency is set to a positive value, we honor it and ignore value of - // TSDB.DeprecatedMaxTSDBOpeningConcurrencyOnStartup, being the latter deprecated. - // If TSDBConfig.WALReplayConcurrency is 0, it is ignored, and TSDB.DeprecatedMaxTSDBOpeningConcurrencyOnStartup - // determines the number of concurrent processes opening TSDBs. - if i.cfg.BlocksStorageConfig.TSDB.WALReplayConcurrency > 0 { +func getOpenTSDBsConcurrencyConfig(tsdbConfig mimir_tsdb.TSDBConfig, userCount int) (tsdbOpenConcurrency, tsdbWALReplayConcurrency int) { + tsdbOpenConcurrency = tsdbConfig.DeprecatedMaxTSDBOpeningConcurrencyOnStartup + tsdbWALReplayConcurrency = 0 + // When WALReplayConcurrency is enabled, we want to ensure the WAL replay at ingester startup + // doesn't use more than the configured number of CPU cores. In order to optimize performance + // both on single tenant and multi tenant Mimir clusters, we use a heuristic to decide whether + // it's better to parallelize the WAL replay of each single TSDB (low number of tenants) or + // the WAL replay of multiple TSDBs at the same time (high number of tenants). + if tsdbConfig.WALReplayConcurrency > 0 { if userCount <= maxTSDBOpenWithoutConcurrency { tsdbOpenConcurrency = 1 - walReplayConcurrency = i.cfg.BlocksStorageConfig.TSDB.WALReplayConcurrency + tsdbWALReplayConcurrency = tsdbConfig.WALReplayConcurrency } else { - tsdbOpenConcurrency = i.cfg.BlocksStorageConfig.TSDB.WALReplayConcurrency - walReplayConcurrency = 1 + tsdbOpenConcurrency = tsdbConfig.WALReplayConcurrency + tsdbWALReplayConcurrency = 1 } } return diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 6bb0f32b636..e4f8696c3c4 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -6663,3 +6663,46 @@ func testIngesterCanEnableIngestAndQueryNativeHistograms(t *testing.T, sampleHis testResult(expectedMatrix, "Result should contain the histogram even when not accepting histograms") } + +func TestIngester_GetOpenTSDBsConcurrencyConfig(t *testing.T) { + tests := map[string]struct { + walReplayConcurrency int + maxTSDBOpeningConcurrencyOnStartup int + tenantCount int + expectedTSDBOpenConcurrency int + expectedTSDBWALReplayConcurrency int + }{ + "if -blocks-storage.tsdb.wal-replay-concurrency is 0, -blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup is used": { + walReplayConcurrency: 0, + maxTSDBOpeningConcurrencyOnStartup: 10, + tenantCount: 5, + expectedTSDBOpenConcurrency: 10, + expectedTSDBWALReplayConcurrency: 0, + }, + "if -blocks-storage.tsdb.wal-replay-concurrency > 0 and there are <= 10 tenants, ignore -blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup and parallelize WAL replay on sequential openings": { + walReplayConcurrency: 3, + maxTSDBOpeningConcurrencyOnStartup: 10, + tenantCount: 5, + expectedTSDBOpenConcurrency: 1, + expectedTSDBWALReplayConcurrency: 3, + }, + "if -blocks-storage.tsdb.wal-replay-concurrency > 0 and there are > 10 tenants, ignore -blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup and parallelize openings with single WAL replay": { + walReplayConcurrency: 3, + maxTSDBOpeningConcurrencyOnStartup: 10, + tenantCount: 5, + expectedTSDBOpenConcurrency: 1, + expectedTSDBWALReplayConcurrency: 3, + }, + } + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + tsdbConfig := mimir_tsdb.TSDBConfig{ + WALReplayConcurrency: testData.walReplayConcurrency, + DeprecatedMaxTSDBOpeningConcurrencyOnStartup: testData.maxTSDBOpeningConcurrencyOnStartup, + } + tsdbOpenConcurrency, tsdbWALReplayConcurrency := getOpenTSDBsConcurrencyConfig(tsdbConfig, testData.tenantCount) + require.Equal(t, testData.expectedTSDBOpenConcurrency, tsdbOpenConcurrency) + require.Equal(t, testData.expectedTSDBWALReplayConcurrency, tsdbWALReplayConcurrency) + }) + } +} diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index 4f0a7d12a2c..9e1874e8b16 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -234,7 +234,7 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.StripeSize, "blocks-storage.tsdb.stripe-size", 16384, headStripeSizeHelp) f.BoolVar(&cfg.WALCompressionEnabled, "blocks-storage.tsdb.wal-compression-enabled", false, "True to enable TSDB WAL compression.") f.IntVar(&cfg.WALSegmentSizeBytes, "blocks-storage.tsdb.wal-segment-size-bytes", wlog.DefaultSegmentSize, "TSDB WAL segments files max size (bytes).") - f.IntVar(&cfg.WALReplayConcurrency, "blocks-storage.tsdb.wal-replay-concurrency", 0, "Maximum number of CPUs that can simultaneously processes WAL replay. If it is set to 0, then each TSDB is replayed with a concurrency equal to the number of CPU cores available on the machine. If set to a positive value it overrides the deprecated "+maxTSDBOpeningConcurrencyOnStartupFlag+" option") + f.IntVar(&cfg.WALReplayConcurrency, "blocks-storage.tsdb.wal-replay-concurrency", 0, "Maximum number of CPUs that can simultaneously processes WAL replay. If it is set to 0, then each TSDB is replayed with a concurrency equal to the number of CPU cores available on the machine. If set to a positive value it overrides the deprecated -"+maxTSDBOpeningConcurrencyOnStartupFlag+" option.") f.BoolVar(&cfg.FlushBlocksOnShutdown, "blocks-storage.tsdb.flush-blocks-on-shutdown", false, "True to flush blocks to storage on shutdown. If false, incomplete blocks will be reused after restart.") f.DurationVar(&cfg.CloseIdleTSDBTimeout, "blocks-storage.tsdb.close-idle-tsdb-timeout", 13*time.Hour, "If TSDB has not received any data for this duration, and all blocks from TSDB have been shipped, TSDB is closed and deleted from local disk. If set to positive value, this value should be equal or higher than -querier.query-ingesters-within flag to make sure that TSDB is not closed prematurely, which could cause partial query results. 0 or negative value disables closing of idle TSDB.") f.BoolVar(&cfg.MemorySnapshotOnShutdown, "blocks-storage.tsdb.memory-snapshot-on-shutdown", false, "True to enable snapshotting of in-memory TSDB data on disk when shutting down.") From a4ef7550249671e4e603d428b185ea5596d762e4 Mon Sep 17 00:00:00 2001 From: Yuri Nikolic Date: Fri, 10 Mar 2023 15:38:08 +0100 Subject: [PATCH 6/6] Fixing review findings --- pkg/ingester/ingester_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index e4f8696c3c4..6fda28fc05f 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -6689,9 +6689,9 @@ func TestIngester_GetOpenTSDBsConcurrencyConfig(t *testing.T) { "if -blocks-storage.tsdb.wal-replay-concurrency > 0 and there are > 10 tenants, ignore -blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup and parallelize openings with single WAL replay": { walReplayConcurrency: 3, maxTSDBOpeningConcurrencyOnStartup: 10, - tenantCount: 5, - expectedTSDBOpenConcurrency: 1, - expectedTSDBWALReplayConcurrency: 3, + tenantCount: 15, + expectedTSDBOpenConcurrency: 3, + expectedTSDBWALReplayConcurrency: 1, }, } for testName, testData := range tests {