diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index e20ae4ed8ab..5d024318de0 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -5714,6 +5714,17 @@ "fieldFlag": "blocks-storage.tsdb.dir", "fieldType": "string" }, + { + "kind": "field", + "name": "block_ranges_period", + "required": false, + "desc": "TSDB blocks range period.", + "fieldValue": null, + "fieldDefaultValue": [], + "fieldFlag": "blocks-storage.tsdb.block-ranges-period", + "fieldType": "list of durations", + "fieldCategory": "advanced" + }, { "kind": "field", "name": "retention_period", diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index ed770a62e45..e5f5bbb6181 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -480,7 +480,7 @@ Usage of ./cmd/mimir/mimir: -blocks-storage.swift.username string OpenStack Swift username. -blocks-storage.tsdb.block-ranges-period comma-separated-list-of-durations - [experimental] TSDB blocks range period. (default 2h0m0s) + TSDB blocks range period. (default 2h0m0s) -blocks-storage.tsdb.close-idle-tsdb-timeout duration If TSDB has not received any data for this duration, and all blocks from TSDB have been shipped, TSDB is closed and deleted from local disk. If set to positive value, this value should be equal or higher than -querier.query-ingesters-within flag to make sure that TSDB is not closed prematurely, which could cause partial query results. 0 or negative value disables closing of idle TSDB. (default 13h0m0s) -blocks-storage.tsdb.dir string diff --git a/docs/sources/migration-guide/migrating-from-single-zone-with-helm.md b/docs/sources/migration-guide/migrating-from-single-zone-with-helm.md index 0e9715a5e37..aacd2bf6932 100644 --- a/docs/sources/migration-guide/migrating-from-single-zone-with-helm.md +++ b/docs/sources/migration-guide/migrating-from-single-zone-with-helm.md @@ -336,7 +336,7 @@ Set the chosen configuration in your custom values (e.g. `custom.yaml`). There are two ways to do the migration: 1. With downtime. In this [procedure](#migrate-ingesters-with-downtime) ingress is stopped to the cluster while ingesters are migrated. This is the quicker and simpler way. The time it takes to execute this migration depends on how fast ingesters restart and upload their data to object storage, but in general should be finished in an hour. -1. Without downtime. This is a multi step [procedure](#migrate-ingesters-without-downtime) which requires additional hardware resources as the old and new ingesters run in parallel for some time. This is a complex migration that can take days and requires monitoring for increased resouce utilization. The minimum time it takes to do this migration can be calculated as (`querier.query_store_after`) + (2h TSDB blocks range period + `blocks_storage.tsdb.head_compaction_idle_timeout`) \* (1 + number_of_ingesters / 21). With the default values this means 12h + 3h \* (1 + number of ingesters / 21) = 15h + 3h \* (number_of_ingesters / 21). Add an extra 12 hours if shuffle sharding is enabled. +1. Without downtime. This is a multi step [procedure](#migrate-ingesters-without-downtime) which requires additional hardware resources as the old and new ingesters run in parallel for some time. This is a complex migration that can take days and requires monitoring for increased resouce utilization. The minimum time it takes to do this migration can be calculated as (`querier.query_store_after`) + (`blocks_storage.tsdb.block_ranges_period` + `blocks_storage.tsdb.head_compaction_idle_timeout`) \* (1 + number_of_ingesters / 21). With the default values this means 12h + 3h \* (1 + number of ingesters / 21) = 15h + 3h \* (number_of_ingesters / 21). Add an extra 12 hours if shuffle sharding is enabled. ### Migrate ingesters with downtime @@ -543,7 +543,7 @@ Before starting this procedure, set up your zones according to [Configure zone-a 1. Once the new ingesters are started and are ready, wait at least 3 hours. - The 3 hours is calculated from 2h TSDB block range period + `blocks_storage.tsdb.head_compaction_idle_timeout` Grafana Mimir parameters to give enough time for ingesters to remove stale series from memory. Stale series will be there due to series being moved between ingesters. + The 3 hours is calculated from `blocks_storage.tsdb.block_ranges_period` + `blocks_storage.tsdb.head_compaction_idle_timeout` Grafana Mimir parameters to give enough time for ingesters to remove stale series from memory. Stale series will be there due to series being moved between ingesters. 1. If the current `` above in `ingester.zoneAwareReplication.migration.replicas` is less than `ingester.replicas`, go back and increase `` with at most 21 and repeat these four steps. @@ -689,7 +689,7 @@ Before starting this procedure, set up your zones according to [Configure zone-a 1. Wait at least 3 hours. - The 3 hours is calculated from 2h TSDB block range period + `blocks_storage.tsdb.head_compaction_idle_timeout` Grafana Mimir parameters to give enough time for ingesters to remove stale series from memory. Stale series will be there due to series being moved between ingesters. + The 3 hours is calculated from `blocks_storage.tsdb.block_ranges_period` + `blocks_storage.tsdb.head_compaction_idle_timeout` Grafana Mimir parameters to give enough time for ingesters to remove stale series from memory. Stale series will be there due to series being moved between ingesters. 1. If you are using [shuffle sharding]({{< relref "../operators-guide/configure/configure-shuffle-sharding" >}}): diff --git a/docs/sources/operators-guide/configure/reference-configuration-parameters/index.md b/docs/sources/operators-guide/configure/reference-configuration-parameters/index.md index 32fb5d1c1e8..1f250781fd9 100644 --- a/docs/sources/operators-guide/configure/reference-configuration-parameters/index.md +++ b/docs/sources/operators-guide/configure/reference-configuration-parameters/index.md @@ -3024,6 +3024,10 @@ tsdb: # CLI flag: -blocks-storage.tsdb.dir [dir: | default = "./tsdb/"] + # (advanced) TSDB blocks range period. + # CLI flag: -blocks-storage.tsdb.block-ranges-period + [block_ranges_period: | default = 2h0m0s] + # TSDB blocks retention in the ingester before a block is removed, relative to # the newest block written for the tenant. This should be larger than the # -blocks-storage.tsdb.block-ranges-period, -querier.query-store-after and diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index 9012eebda11..fbff3c4bb66 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -149,7 +149,7 @@ func (cfg *BlocksStorageConfig) Validate() error { //nolint:golint type TSDBConfig struct { Dir string `yaml:"dir"` - BlockRanges DurationList `yaml:"block_ranges_period" category:"experimental" doc:"hidden"` + BlockRanges DurationList `yaml:"block_ranges_period" category:"advanced"` Retention time.Duration `yaml:"retention_period"` ShipInterval time.Duration `yaml:"ship_interval" category:"advanced"` ShipConcurrency int `yaml:"ship_concurrency" category:"advanced"`