Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor(bloom planner): Compute gaps and build tasks from metas and TSDBs #12994

Merged
merged 11 commits into from
May 21, 2024
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions docs/sources/shared/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -333,6 +333,23 @@ bloom_build:
[enabled: <boolean> | default = false]

planner:
# Interval at which to re-run the bloom creation planning.
# CLI flag: -bloom-build.planner.interval
[planning_interval: <duration> | default = 10m]

# Newest day-table offset (from today, inclusive) to build blooms for.
# Increase to lower cost by not re-writing data to object storage too
# frequently since recent data changes more often at the cost of not having
# blooms available as quickly.
# CLI flag: -bloom-build.planner.min-table-offset
[min_table_offset: <int> | default = 1]

# Oldest day-table offset (from today, inclusive) to compact. This can be
# used to lower cost by not trying to compact older data which doesn't
# change. This can be optimized by aligning it with the maximum
# `reject_old_samples_max_age` setting of any tenant.
# CLI flag: -bloom-build.planner.max-table-offset
[max_table_offset: <int> | default = 2]

builder:

Expand Down Expand Up @@ -3382,6 +3399,16 @@ shard_streams:
# CLI flag: -bloom-compactor.max-bloom-size
[bloom_compactor_max_bloom_size: <int> | default = 128MB]

# Experimental. Whether to create blooms for the tenant.
# CLI flag: -bloom-build.enable
[bloom_creation_enabled: <boolean> | default = false]

# Experimental. Number of splits to create for the series keyspace when building
# blooms. The series keyspace is split into this many parts to parallelize bloom
# creation.
# CLI flag: -bloom-build.split-keyspace-by-factor
[bloom_split_series_keyspace_by_factor: <int> | default = 256]

# Experimental. Length of the n-grams created when computing blooms from log
# lines.
# CLI flag: -bloom-compactor.ngram-length
Expand Down
29 changes: 24 additions & 5 deletions pkg/bloombuild/planner/config.go
Original file line number Diff line number Diff line change
@@ -1,21 +1,40 @@
package planner

import "flag"
import (
"flag"
"fmt"
"time"
)

// Config configures the bloom-planner component.
type Config struct {
// TODO: Add config
PlanningInterval time.Duration `yaml:"planning_interval"`
MinTableOffset int `yaml:"min_table_offset"`
MaxTableOffset int `yaml:"max_table_offset"`
}

// RegisterFlagsWithPrefix registers flags for the bloom-planner configuration.
func (cfg *Config) RegisterFlagsWithPrefix(_ string, _ *flag.FlagSet) {
// TODO: Register flags with flagsPrefix
func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.DurationVar(&cfg.PlanningInterval, prefix+".interval", 10*time.Minute, "Interval at which to re-run the bloom creation planning.")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: IMO, 10m is too frequent, it's more frequent than the TSDB index is compacted.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I changed it to 8 hours, so it runs three times a day. Wdyt

f.IntVar(&cfg.MinTableOffset, prefix+".min-table-offset", 1, "Newest day-table offset (from today, inclusive) to build blooms for. Increase to lower cost by not re-writing data to object storage too frequently since recent data changes more often at the cost of not having blooms available as quickly.")
// TODO(owen-d): ideally we'd set this per tenant based on their `reject_old_samples_max_age` setting,
// but due to how we need to discover tenants, we can't do that yet. Tenant+Period discovery is done by
// iterating the table periods in object storage and looking for tenants within that period.
// In order to have this done dynamically, we'd need to account for tenant specific overrides, which are also
// dynamically reloaded.
// I'm doing it the simple way for now.
f.IntVar(&cfg.MaxTableOffset, prefix+".max-table-offset", 2, "Oldest day-table offset (from today, inclusive) to compact. This can be used to lower cost by not trying to compact older data which doesn't change. This can be optimized by aligning it with the maximum `reject_old_samples_max_age` setting of any tenant.")
}

func (cfg *Config) Validate() error {
if cfg.MinTableOffset > cfg.MaxTableOffset {
return fmt.Errorf("min-table-offset (%d) must be less than or equal to max-table-offset (%d)", cfg.MinTableOffset, cfg.MaxTableOffset)
}

return nil
}

type Limits interface {
// TODO: Add limits
BloomCreationEnabled(tenantID string) bool
BloomSplitSeriesKeyspaceByFactor(tenantID string) int
}
36 changes: 36 additions & 0 deletions pkg/bloombuild/planner/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,19 @@ import (
const (
metricsNamespace = "loki"
metricsSubsystem = "bloomplanner"

statusSuccess = "success"
statusFailure = "failure"
)

type Metrics struct {
running prometheus.Gauge

buildStarted prometheus.Counter
buildCompleted *prometheus.CounterVec
buildTime *prometheus.HistogramVec

tenantsDiscovered prometheus.Counter
}

func NewMetrics(r prometheus.Registerer) *Metrics {
Expand All @@ -22,5 +31,32 @@ func NewMetrics(r prometheus.Registerer) *Metrics {
Name: "running",
Help: "Value will be 1 if bloom planner is currently running on this instance",
}),

buildStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
Name: "build_started_total",
Help: "Total number of builds started",
}),
buildCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
Name: "build_completed_total",
Help: "Total number of builds completed",
}, []string{"status"}),
buildTime: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
Name: "build_time_seconds",
Help: "Time spent during a builds cycle.",
Buckets: prometheus.DefBuckets,
}, []string{"status"}),

tenantsDiscovered: promauto.With(r).NewCounter(prometheus.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
Name: "tenants_discovered_total",
Help: "Number of tenants discovered during the current build iteration",
}),
}
}
Loading
Loading