diff --git a/CHANGELOG.md b/CHANGELOG.md index a1b6b5e4480..0ea4762c417 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ distributors. Also, during this period, the ingesters will use considerably more resources and as such should be scaled up (or incoming traffic should be heavily throttled). Once all distributors and ingesters have rolled performance will return to normal. Internally we have observed ~1.5x CPU load on the ingesters during the rollout. [#1227](https://github.com/grafana/tempo/pull/1227) (@joe-elliott) +* [FEATURE] Added metrics-generator: an optional components to generate metrics from ingested traces [#1282](https://github.com/grafana/tempo/pull/1282) (@mapno, @kvrhdn) * [ENHANCEMENT] Enterprise jsonnet: add config to create tokengen job explicitly [#1256](https://github.com/grafana/tempo/pull/1256) (@kvrhdn) * [ENHANCEMENT] Add new scaling alerts to the tempo-mixin [#1292](https://github.com/grafana/tempo/pull/1292) (@mapno) * [BUGFIX]: Remove unnecessary PersistentVolumeClaim [#1245](https://github.com/grafana/tempo/issues/1245) diff --git a/cmd/tempo/app/app.go b/cmd/tempo/app/app.go index 8bd9be8bb8b..9747314d7f4 100644 --- a/cmd/tempo/app/app.go +++ b/cmd/tempo/app/app.go @@ -33,6 +33,8 @@ import ( "github.com/grafana/tempo/modules/distributor/receiver" "github.com/grafana/tempo/modules/frontend" frontend_v1 "github.com/grafana/tempo/modules/frontend/v1" + "github.com/grafana/tempo/modules/generator" + generator_client "github.com/grafana/tempo/modules/generator/client" "github.com/grafana/tempo/modules/ingester" ingester_client "github.com/grafana/tempo/modules/ingester/client" "github.com/grafana/tempo/modules/overrides" @@ -48,23 +50,26 @@ const apiDocs = "https://grafana.com/docs/tempo/latest/api_docs/" // Config is the root config for App. type Config struct { - Target string `yaml:"target,omitempty"` - AuthEnabled bool `yaml:"auth_enabled,omitempty"` - MultitenancyEnabled bool `yaml:"multitenancy_enabled,omitempty"` - SearchEnabled bool `yaml:"search_enabled,omitempty"` - HTTPAPIPrefix string `yaml:"http_api_prefix"` - UseOTelTracer bool `yaml:"use_otel_tracer,omitempty"` - - Server server.Config `yaml:"server,omitempty"` - Distributor distributor.Config `yaml:"distributor,omitempty"` - IngesterClient ingester_client.Config `yaml:"ingester_client,omitempty"` - Querier querier.Config `yaml:"querier,omitempty"` - Frontend frontend.Config `yaml:"query_frontend,omitempty"` - Compactor compactor.Config `yaml:"compactor,omitempty"` - Ingester ingester.Config `yaml:"ingester,omitempty"` - StorageConfig storage.Config `yaml:"storage,omitempty"` - LimitsConfig overrides.Limits `yaml:"overrides,omitempty"` - MemberlistKV memberlist.KVConfig `yaml:"memberlist,omitempty"` + Target string `yaml:"target,omitempty"` + AuthEnabled bool `yaml:"auth_enabled,omitempty"` + MultitenancyEnabled bool `yaml:"multitenancy_enabled,omitempty"` + SearchEnabled bool `yaml:"search_enabled,omitempty"` + MetricsGeneratorEnabled bool `yaml:"metrics_generator_enabled"` + HTTPAPIPrefix string `yaml:"http_api_prefix"` + UseOTelTracer bool `yaml:"use_otel_tracer,omitempty"` + + Server server.Config `yaml:"server,omitempty"` + Distributor distributor.Config `yaml:"distributor,omitempty"` + IngesterClient ingester_client.Config `yaml:"ingester_client,omitempty"` + GeneratorClient generator_client.Config `yaml:"metrics_generator_client,omitempty"` + Querier querier.Config `yaml:"querier,omitempty"` + Frontend frontend.Config `yaml:"query_frontend,omitempty"` + Compactor compactor.Config `yaml:"compactor,omitempty"` + Ingester ingester.Config `yaml:"ingester,omitempty"` + Generator generator.Config `yaml:"metrics_generator,omitempty"` + StorageConfig storage.Config `yaml:"storage,omitempty"` + LimitsConfig overrides.Limits `yaml:"overrides,omitempty"` + MemberlistKV memberlist.KVConfig `yaml:"memberlist,omitempty"` } // RegisterFlagsAndApplyDefaults registers flag. @@ -108,10 +113,13 @@ func (c *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) { // Everything else flagext.DefaultValues(&c.IngesterClient) c.IngesterClient.GRPCClientConfig.GRPCCompression = "snappy" + flagext.DefaultValues(&c.GeneratorClient) + c.GeneratorClient.GRPCClientConfig.GRPCCompression = "snappy" flagext.DefaultValues(&c.LimitsConfig) c.Distributor.RegisterFlagsAndApplyDefaults(util.PrefixConfig(prefix, "distributor"), f) c.Ingester.RegisterFlagsAndApplyDefaults(util.PrefixConfig(prefix, "ingester"), f) + c.Generator.RegisterFlagsAndApplyDefaults(util.PrefixConfig(prefix, "generator"), f) c.Querier.RegisterFlagsAndApplyDefaults(util.PrefixConfig(prefix, "querier"), f) c.Frontend.RegisterFlagsAndApplyDefaults(util.PrefixConfig(prefix, "frontend"), f) c.Compactor.RegisterFlagsAndApplyDefaults(util.PrefixConfig(prefix, "compactor"), f) @@ -126,14 +134,19 @@ func (c *Config) MultitenancyIsEnabled() bool { // CheckConfig checks if config values are suspect. func (c *Config) CheckConfig() { + if c.Target == MetricsGenerator && !c.MetricsGeneratorEnabled { + level.Warn(log.Logger).Log("msg", "target == metrics-generator but metrics_generator_enabled != true", + "explain", "The metrics-generator will only receive data if metrics_generator_enabled is set to true globally") + } + if c.Ingester.CompleteBlockTimeout < c.StorageConfig.Trace.BlocklistPoll { level.Warn(log.Logger).Log("msg", "ingester.complete_block_timeout < storage.trace.blocklist_poll", - "explan", "You may receive 404s between the time the ingesters have flushed a trace and the querier is aware of the new block") + "explain", "You may receive 404s between the time the ingesters have flushed a trace and the querier is aware of the new block") } if c.Compactor.Compactor.BlockRetention < c.StorageConfig.Trace.BlocklistPoll { level.Warn(log.Logger).Log("msg", "compactor.compaction.compacted_block_timeout < storage.trace.blocklist_poll", - "explan", "Queriers and Compactors may attempt to read a block that no longer exists") + "explain", "Queriers and Compactors may attempt to read a block that no longer exists") } if c.Compactor.Compactor.RetentionConcurrency == 0 { @@ -142,7 +155,7 @@ func (c *Config) CheckConfig() { if c.StorageConfig.Trace.Backend == "s3" && c.Compactor.Compactor.FlushSizeBytes < 5242880 { level.Warn(log.Logger).Log("msg", "c.Compactor.Compactor.FlushSizeBytes < 5242880", - "explan", "Compaction flush size should be 5MB or higher for S3 backend") + "explain", "Compaction flush size should be 5MB or higher for S3 backend") } if c.StorageConfig.Trace.BlocklistPollConcurrency == 0 { @@ -161,16 +174,18 @@ func newDefaultConfig() *Config { type App struct { cfg Config - Server *server.Server - ring *ring.Ring - overrides *overrides.Overrides - distributor *distributor.Distributor - querier *querier.Querier - frontend *frontend_v1.Frontend - compactor *compactor.Compactor - ingester *ingester.Ingester - store storage.Store - MemberlistKV *memberlist.KVInitService + Server *server.Server + ring *ring.Ring + generatorRing *ring.Ring + overrides *overrides.Overrides + distributor *distributor.Distributor + querier *querier.Querier + frontend *frontend_v1.Frontend + compactor *compactor.Compactor + ingester *ingester.Ingester + generator *generator.Generator + store storage.Store + MemberlistKV *memberlist.KVInitService HTTPAuthMiddleware middleware.Interface TracesConsumerMiddleware receiver.Middleware @@ -386,6 +401,15 @@ func (t *App) readyHandler(sm *services.Manager) http.HandlerFunc { } } + // Generator has a special check that makes sure that it was able to register into the ring, + // and that all other ring entries are OK too. + if t.generator != nil { + if err := t.generator.CheckReady(r.Context()); err != nil { + http.Error(w, "Generator not ready: "+err.Error(), http.StatusServiceUnavailable) + return + } + } + // Query Frontend has a special check that makes sure that a querier is attached before it signals // itself as ready if t.frontend != nil { diff --git a/cmd/tempo/app/modules.go b/cmd/tempo/app/modules.go index 77c13fa1929..55f26d4b0f4 100644 --- a/cmd/tempo/app/modules.go +++ b/cmd/tempo/app/modules.go @@ -11,7 +11,6 @@ import ( "github.com/grafana/dskit/modules" "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" - frontend_v1pb "github.com/grafana/tempo/modules/frontend/v1/frontendv1pb" "github.com/prometheus/client_golang/prometheus" "github.com/thanos-io/thanos/pkg/discovery/dns" "github.com/weaveworks/common/middleware" @@ -20,6 +19,8 @@ import ( "github.com/grafana/tempo/modules/compactor" "github.com/grafana/tempo/modules/distributor" "github.com/grafana/tempo/modules/frontend" + frontend_v1pb "github.com/grafana/tempo/modules/frontend/v1/frontendv1pb" + "github.com/grafana/tempo/modules/generator" "github.com/grafana/tempo/modules/ingester" "github.com/grafana/tempo/modules/overrides" "github.com/grafana/tempo/modules/querier" @@ -33,10 +34,12 @@ import ( // The various modules that make up tempo. const ( Ring string = "ring" + MetricsGeneratorRing string = "metrics-generator-ring" Overrides string = "overrides" Server string = "server" Distributor string = "distributor" Ingester string = "ingester" + MetricsGenerator string = "metrics-generator" Querier string = "querier" QueryFrontend string = "query-frontend" Compactor string = "compactor" @@ -86,6 +89,18 @@ func (t *App) initRing() (services.Service, error) { return t.ring, nil } +func (t *App) initGeneratorRing() (services.Service, error) { + generatorRing, err := tempo_ring.New(t.cfg.Generator.Ring.ToRingConfig(), "metrics-generator", generator.RingKey, prometheus.DefaultRegisterer) + if err != nil { + return nil, fmt.Errorf("failed to create metrics-generator ring %w", err) + } + t.generatorRing = generatorRing + + t.Server.HTTP.Handle("/metrics-generator/ring", t.generatorRing) + + return t.generatorRing, nil +} + func (t *App) initOverrides() (services.Service, error) { overrides, err := overrides.NewOverrides(t.cfg.LimitsConfig) if err != nil { @@ -104,7 +119,7 @@ func (t *App) initOverrides() (services.Service, error) { func (t *App) initDistributor() (services.Service, error) { // todo: make ingester client a module instead of passing the config everywhere - distributor, err := distributor.New(t.cfg.Distributor, t.cfg.IngesterClient, t.ring, t.overrides, t.TracesConsumerMiddleware, t.cfg.Server.LogLevel, t.cfg.SearchEnabled, prometheus.DefaultRegisterer) + distributor, err := distributor.New(t.cfg.Distributor, t.cfg.IngesterClient, t.ring, t.cfg.GeneratorClient, t.generatorRing, t.overrides, t.TracesConsumerMiddleware, t.cfg.Server.LogLevel, t.cfg.SearchEnabled, t.cfg.MetricsGeneratorEnabled, prometheus.DefaultRegisterer) if err != nil { return nil, fmt.Errorf("failed to create distributor %w", err) } @@ -132,6 +147,18 @@ func (t *App) initIngester() (services.Service, error) { return t.ingester, nil } +func (t *App) initGenerator() (services.Service, error) { + t.cfg.Generator.Ring.ListenPort = t.cfg.Server.GRPCListenPort + generator, err := generator.New(&t.cfg.Generator, t.overrides, prometheus.DefaultRegisterer) + if err != nil { + return nil, fmt.Errorf("failed to create metrics-generator %w", err) + } + t.generator = generator + + tempopb.RegisterMetricsGeneratorServer(t.Server.GRPC, t.generator) + return t.generator, nil +} + func (t *App) initQuerier() (services.Service, error) { // validate worker config // if we're not in single binary mode and worker address is not specified - bail @@ -268,6 +295,7 @@ func (t *App) initMemberlistKV() (services.Service, error) { t.MemberlistKV = memberlist.NewKVInitService(&t.cfg.MemberlistKV, log.Logger, dnsProvider, reg) t.cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV + t.cfg.Generator.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.cfg.Distributor.DistributorRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.cfg.Compactor.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV @@ -282,12 +310,14 @@ func (t *App) setupModuleManager() error { mm.RegisterModule(Server, t.initServer, modules.UserInvisibleModule) mm.RegisterModule(MemberlistKV, t.initMemberlistKV, modules.UserInvisibleModule) mm.RegisterModule(Ring, t.initRing, modules.UserInvisibleModule) + mm.RegisterModule(MetricsGeneratorRing, t.initGeneratorRing, modules.UserInvisibleModule) mm.RegisterModule(Overrides, t.initOverrides, modules.UserInvisibleModule) mm.RegisterModule(Distributor, t.initDistributor) mm.RegisterModule(Ingester, t.initIngester) mm.RegisterModule(Querier, t.initQuerier) mm.RegisterModule(QueryFrontend, t.initQueryFrontend) mm.RegisterModule(Compactor, t.initCompactor) + mm.RegisterModule(MetricsGenerator, t.initGenerator) mm.RegisterModule(Store, t.initStore, modules.UserInvisibleModule) mm.RegisterModule(SingleBinary, nil) mm.RegisterModule(ScalableSingleBinary, nil) @@ -299,14 +329,23 @@ func (t *App) setupModuleManager() error { MemberlistKV: {Server}, QueryFrontend: {Store, Server}, Ring: {Server, MemberlistKV}, + MetricsGeneratorRing: {Server, MemberlistKV}, Distributor: {Ring, Server, Overrides}, Ingester: {Store, Server, Overrides, MemberlistKV}, + MetricsGenerator: {Server, Overrides, MemberlistKV}, Querier: {Store, Ring, Overrides}, Compactor: {Store, Server, Overrides, MemberlistKV}, SingleBinary: {Compactor, QueryFrontend, Querier, Ingester, Distributor}, ScalableSingleBinary: {SingleBinary}, } + if t.cfg.MetricsGeneratorEnabled { + // If metrics-generator is enabled, the distributor needs the metrics-generator ring + deps[Distributor] = append(deps[Distributor], MetricsGeneratorRing) + // Add the metrics generator as dependency for when target is {,scalable-}single-binary + deps[SingleBinary] = append(deps[SingleBinary], MetricsGenerator) + } + for mod, targets := range deps { if err := mm.AddDependency(mod, targets...); err != nil { return err diff --git a/cmd/tempo/main.go b/cmd/tempo/main.go index c2f9f38dde6..ad3ae2fd8f8 100644 --- a/cmd/tempo/main.go +++ b/cmd/tempo/main.go @@ -178,6 +178,10 @@ func loadConfig() (*app.Config, error) { config.Ingester.LifecyclerConfig.RingConfig.KVStore.Store = "inmemory" config.Ingester.LifecyclerConfig.RingConfig.ReplicationFactor = 1 config.Ingester.LifecyclerConfig.Addr = "127.0.0.1" + + // Generator's ring + config.Generator.Ring.KVStore.Store = "inmemory" + config.Generator.Ring.InstanceAddr = "127.0.0.1" } return config, nil diff --git a/example/docker-compose/distributed/docker-compose.yaml b/example/docker-compose/distributed/docker-compose.yaml index 76446290f93..8c42c00cc37 100644 --- a/example/docker-compose/distributed/docker-compose.yaml +++ b/example/docker-compose/distributed/docker-compose.yaml @@ -63,6 +63,15 @@ services: ports: - "3200" # tempo + metrics_generator: + image: tempo:latest + command: "-target=metrics-generator -config.file=/etc/tempo.yaml" + restart: always + volumes: + - ./tempo-distributed.yaml:/etc/tempo.yaml + ports: + - "3200" # tempo + minio: image: minio/minio:latest environment: @@ -85,19 +94,22 @@ services: prometheus: image: prom/prometheus:latest - command: [ "--config.file=/etc/prometheus.yaml" ] + command: + - --config.file=/etc/prometheus.yaml + - --web.enable-remote-write-receiver volumes: - ./prometheus.yaml:/etc/prometheus.yaml ports: - "9090:9090" grafana: - image: grafana/grafana:8.1.6 + image: grafana/grafana:8.3.6 volumes: - ./grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml environment: - GF_AUTH_ANONYMOUS_ENABLED=true - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin - GF_AUTH_DISABLE_LOGIN_FORM=true + - GF_FEATURE_TOGGLES_ENABLE=tempoSearch tempoBackendSearch tempoServiceGraph ports: - "3000:3000" diff --git a/example/docker-compose/distributed/grafana-datasources.yaml b/example/docker-compose/distributed/grafana-datasources.yaml index 7d5fe5be350..94e84255b3f 100644 --- a/example/docker-compose/distributed/grafana-datasources.yaml +++ b/example/docker-compose/distributed/grafana-datasources.yaml @@ -10,6 +10,7 @@ datasources: isDefault: false version: 1 editable: false + uid: prometheus - name: Tempo type: tempo access: proxy @@ -20,4 +21,7 @@ datasources: version: 1 editable: false apiVersion: 1 + jsonData: + serviceMap: + datasourceUid: prometheus uid: tempo diff --git a/example/docker-compose/distributed/prometheus.yaml b/example/docker-compose/distributed/prometheus.yaml index 27f7c6f5405..439e48ce649 100644 --- a/example/docker-compose/distributed/prometheus.yaml +++ b/example/docker-compose/distributed/prometheus.yaml @@ -16,3 +16,4 @@ scrape_configs: - 'ingester-2:3200' - 'querier:3200' - 'query-frontend:3200' + - 'metrics-generator:3200' diff --git a/example/docker-compose/distributed/tempo-distributed.yaml b/example/docker-compose/distributed/tempo-distributed.yaml index 1f278f78fb2..f4cecfacba1 100644 --- a/example/docker-compose/distributed/tempo-distributed.yaml +++ b/example/docker-compose/distributed/tempo-distributed.yaml @@ -1,4 +1,5 @@ search_enabled: true +metrics_generator_enabled: true server: http_listen_port: 3200 @@ -46,6 +47,12 @@ querier: frontend_worker: frontend_address: query-frontend:9095 +metrics_generator: + remote_write: + enabled: true + client: + url: http://prometheus:9090/api/v1/write + storage: trace: backend: s3 @@ -63,3 +70,6 @@ storage: pool: max_workers: 100 # worker pool determines the number of parallel requests to the object store backend queue_depth: 10000 + +overrides: + metrics_generator_processors: ['service-graphs', 'span-metrics'] diff --git a/go.mod b/go.mod index e0906d8378e..bc1802bd929 100644 --- a/go.mod +++ b/go.mod @@ -92,6 +92,7 @@ require ( cloud.google.com/go v0.99.0 // indirect github.com/Shopify/sarama v1.30.1 // indirect github.com/VividCortex/gohistogram v1.0.0 // indirect + github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect github.com/apache/thrift v0.15.0 // indirect github.com/armon/go-metrics v0.3.9 // indirect @@ -146,6 +147,7 @@ require ( github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid v1.3.1 // indirect github.com/knadh/koanf v1.3.3 // indirect @@ -165,6 +167,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.1.15 // indirect + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/oklog/run v1.1.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.41.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.41.0 // indirect @@ -176,6 +179,7 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect diff --git a/go.sum b/go.sum index 553adc2fc42..5cabd626014 100644 --- a/go.sum +++ b/go.sum @@ -60,6 +60,7 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v58.3.0+incompatible h1:lb9OWePNuJMiibdxg9XvdbiOldR0Yifge37L4LoOxIs= github.com/Azure/azure-sdk-for-go v58.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= @@ -87,7 +88,9 @@ github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8K github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= @@ -114,6 +117,7 @@ github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= @@ -329,6 +333,7 @@ github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -451,6 +456,7 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cu github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= +github.com/digitalocean/godo v1.71.0 h1:a4UZCG1kr8eQ3MmsGoPzcAwkEtJG2Lc7eelzEkfZwtA= github.com/digitalocean/godo v1.71.0/go.mod h1:GBmu8MkjZmNARE7IXRPmkbbnocNN8+uBm0xbEVw2LCs= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= @@ -458,16 +464,20 @@ github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyG github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.10+incompatible h1:GKkP0T7U4ks6X3lmmHKC2QDprnpRJor2Z5a8m62R9ZM= github.com/docker/docker v20.10.10+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= @@ -486,6 +496,7 @@ github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/efficientgo/e2e v0.11.2-0.20211027134903-67d538984a47/go.mod h1:vDnF4AAEZmO0mvyFIATeDJPFaSRM7ywaOnKd61zaSoE= github.com/efficientgo/tools/core v0.0.0-20210129205121-421d0828c9a6/go.mod h1:OmVcnJopJL8d3X3sSXTiypGoUSgFq1aDGmlrdi9dn/M= @@ -570,6 +581,7 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.0.0 h1:kH951GinvFVaQgy/ki/B3YYmQtRpExGigSJg6O8z5jo= github.com/go-logr/logr v1.0.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= @@ -670,6 +682,7 @@ github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3yg github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-redis/redis/v8 v8.11.0 h1:O1Td0mQ8UFChQ3N9zFQqo6kTU2cJ+/it88gDB+zg0wo= github.com/go-redis/redis/v8 v8.11.0/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M= +github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= @@ -679,6 +692,7 @@ github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg78 github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M= github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= +github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= @@ -802,8 +816,10 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -843,7 +859,9 @@ github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gophercloud/gophercloud v0.23.0 h1:I3P10oKlGu3DHP9PrEWMr1ya+/+3Rc9uRHNkRZ9wO7g= github.com/gophercloud/gophercloud v0.23.0/go.mod h1:MRw6uyLj8uCGbIvBlqL7QW67t0QtNZnzydUzewo1Ioc= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= @@ -966,6 +984,7 @@ github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKe github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d h1:W+SIwDdl3+jXWeidYySAgzytE3piq6GumXeBjFBG67c= github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hetznercloud/hcloud-go v1.33.1 h1:W1HdO2bRLTKU4WsyqAasDSpt54fYO4WNckWYfH5AuCQ= github.com/hetznercloud/hcloud-go v1.33.1/go.mod h1:XX/TQub3ge0yWR2yHWmnDVIrB+MQbda1pHxkUmDlUME= github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -981,6 +1000,7 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -1079,6 +1099,7 @@ github.com/knadh/koanf v1.3.2/go.mod h1:HZ7HMLIGbrWJUfgtEzfHvzR/rX+eIqQlBNPRr4Vt github.com/knadh/koanf v1.3.3 h1:eNtBOzQDzkzIIPRCJCx/Ha3DeD/ZFwCAp8JxyqoVAls= github.com/knadh/koanf v1.3.3/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/knq/sysutil v0.0.0-20191005231841-15668db23d08/go.mod h1:dFWs1zEqDjFtnBXsd1vPOZaLsESovai349994nHx3e0= +github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1107,6 +1128,7 @@ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/linode/linodego v1.2.1 h1:v0vS/n9dGMA9evG+fhLJcy7hsf6TUVmacfYiYzARhts= github.com/linode/linodego v1.2.1/go.mod h1:x/7+BoaKd4unViBmS2umdjYyVAmpFtBtEXZ0wou7FYQ= github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= @@ -1337,8 +1359,10 @@ github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1 github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1516,6 +1540,7 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0 github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44 h1:3egqo0Vut6daANFm7tOXdNAa8v5/uLU+sgCJrc88Meo= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= @@ -2123,6 +2148,7 @@ golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 h1:TyHqChC80pFkXWraUUf6RuB5IqFdQieMLwwCJokV2pc= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2392,6 +2418,7 @@ gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -2431,14 +2458,17 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +k8s.io/api v0.20.4 h1:xZjKidCirayzX6tHONRQyTNDVIR55TYVqgATqo6ZULY= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apimachinery v0.22.4 h1:9uwcvPpukBw/Ri0EUmWz+49cnFtaoiyEhQTK+xOe7Ck= k8s.io/apimachinery v0.22.4/go.mod h1:yU6oA6Gnax9RrxGzVvPFFJ+mpnW6PBSqp0sx0I0HHW0= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/client-go v0.20.4 h1:85crgh1IotNkLpKYKZHVNI1JT86nr/iDCvq2iWKsql4= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= @@ -2448,14 +2478,17 @@ k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.20.0 h1:tlyxlSvd63k7axjhuchckaRJm+a92z5GSOrTOQY5sHw= k8s.io/klog/v2 v2.20.0/go.mod h1:Gm8eSIfQN6457haJuPaMxZw4wyP5k+ykPFlrhQDvhvw= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= @@ -2465,7 +2498,9 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyz sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/modules/distributor/distributor.go b/modules/distributor/distributor.go index 89c375805d6..fc32231f471 100644 --- a/modules/distributor/distributor.go +++ b/modules/distributor/distributor.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/health/grpc_health_v1" "github.com/grafana/tempo/modules/distributor/receiver" + generator_client "github.com/grafana/tempo/modules/generator/client" ingester_client "github.com/grafana/tempo/modules/ingester/client" "github.com/grafana/tempo/modules/overrides" _ "github.com/grafana/tempo/pkg/gogocodec" // force gogo codec registration @@ -63,6 +64,16 @@ var ( Name: "distributor_ingester_append_failures_total", Help: "The total number of failed batch appends sent to ingesters.", }, []string{"ingester"}) + metricGeneratorPushes = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Name: "distributor_metrics_generator_pushes_total", + Help: "The total number of span pushes sent to metrics-generators.", + }, []string{"metrics_generator"}) + metricGeneratorPushesFailures = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Name: "distributor_metrics_generator_pushes_failures_total", + Help: "The total number of failed span pushes sent to metrics-generators.", + }, []string{"metrics_generator"}) metricSpansIngested = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: "tempo", Name: "distributor_spans_received_total", @@ -84,6 +95,11 @@ var ( Name: "distributor_ingester_clients", Help: "The current number of ingester clients.", }) + metricGeneratorClients = promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: "tempo", + Name: "distributor_metrics_generator_clients", + Help: "The current number of metrics-generator clients.", + }) metricDiscardedSpans = promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: "tempo", Name: "discarded_spans_total", @@ -115,6 +131,12 @@ type Distributor struct { searchEnabled bool globalTagsToDrop map[string]struct{} + // metrics-generator + metricsGeneratorEnabled bool + generatorClientCfg generator_client.Config + generatorsRing ring.ReadRing + generatorsPool *ring_client.Pool + // Per-user rate limiter. ingestionRateLimiter *limiter.RateLimiter @@ -124,7 +146,7 @@ type Distributor struct { } // New a distributor creates. -func New(cfg Config, clientCfg ingester_client.Config, ingestersRing ring.ReadRing, o *overrides.Overrides, middleware receiver.Middleware, level logging.Level, searchEnabled bool, reg prometheus.Registerer) (*Distributor, error) { +func New(cfg Config, clientCfg ingester_client.Config, ingestersRing ring.ReadRing, generatorClientCfg generator_client.Config, generatorsRing ring.ReadRing, o *overrides.Overrides, middleware receiver.Middleware, loggingLevel logging.Level, searchEnabled bool, metricsGeneratorEnabled bool, reg prometheus.Registerer) (*Distributor, error) { factory := cfg.factory if factory == nil { factory = func(addr string) (ring_client.PoolClient, error) { @@ -166,6 +188,22 @@ func New(cfg Config, clientCfg ingester_client.Config, ingestersRing ring.ReadRi subservices = append(subservices, pool) + var generatorsPool *ring_client.Pool + if metricsGeneratorEnabled { + generatorsPool = ring_client.NewPool( + "distributor_metrics_generator_pool", + generatorClientCfg.PoolConfig, + ring_client.NewRingServiceDiscovery(generatorsRing), + func(addr string) (ring_client.PoolClient, error) { + return generator_client.New(addr, generatorClientCfg) + }, + metricGeneratorClients, + log.Logger, + ) + + subservices = append(subservices, generatorsPool) + } + // turn list into map for efficient checking tagsToDrop := map[string]struct{}{} for _, tag := range cfg.SearchTagsDenyList { @@ -173,16 +211,20 @@ func New(cfg Config, clientCfg ingester_client.Config, ingestersRing ring.ReadRi } d := &Distributor{ - cfg: cfg, - clientCfg: clientCfg, - ingestersRing: ingestersRing, - pool: pool, - DistributorRing: distributorRing, - ingestionRateLimiter: limiter.NewRateLimiter(ingestionRateStrategy, 10*time.Second), - searchEnabled: searchEnabled, - globalTagsToDrop: tagsToDrop, - overrides: o, - traceEncoder: model.MustNewSegmentDecoder(model.CurrentEncoding), + cfg: cfg, + clientCfg: clientCfg, + ingestersRing: ingestersRing, + pool: pool, + DistributorRing: distributorRing, + ingestionRateLimiter: limiter.NewRateLimiter(ingestionRateStrategy, 10*time.Second), + searchEnabled: searchEnabled, + metricsGeneratorEnabled: metricsGeneratorEnabled, + generatorClientCfg: generatorClientCfg, + generatorsRing: generatorsRing, + generatorsPool: generatorsPool, + globalTagsToDrop: tagsToDrop, + overrides: o, + traceEncoder: model.MustNewSegmentDecoder(model.CurrentEncoding), } cfgReceivers := cfg.Receivers @@ -190,7 +232,7 @@ func New(cfg Config, clientCfg ingester_client.Config, ingestersRing ring.ReadRi cfgReceivers = defaultReceivers } - receivers, err := receiver.New(cfgReceivers, d, middleware, level) + receivers, err := receiver.New(cfgReceivers, d, middleware, loggingLevel) if err != nil { return nil, err } @@ -300,6 +342,17 @@ func (d *Distributor) PushBatches(ctx context.Context, batches []*v1.ResourceSpa recordDiscaredSpans(err, userID, spanCount) } + if d.metricsGeneratorEnabled && len(d.overrides.MetricsGeneratorProcessors(userID)) > 0 && err == nil { + // Handle requests sent to the metrics-generator in a separate goroutine, this way we don't + // influence the overall write + go func() { + genErr := d.sendToGenerators(context.Background(), userID, keys, rebatchedTraces) + if genErr != nil { + level.Error(log.Logger).Log("msg", "pushing to metrics-generators failed", "err", genErr) + } + }() + } + return nil, err // PushRequest is ignored, so no reason to create one } @@ -356,6 +409,38 @@ func (d *Distributor) sendToIngestersViaBytes(ctx context.Context, userID string return err } +func (d *Distributor) sendToGenerators(ctx context.Context, userID string, keys []uint32, traces []*rebatchedTrace) error { + // If an instance is unhealthy write to the next one (i.e. write extend is enabled) + op := ring.Write + + err := ring.DoBatch(ctx, op, d.generatorsRing, keys, func(generator ring.InstanceDesc, indexes []int) error { + localCtx, cancel := context.WithTimeout(ctx, d.generatorClientCfg.RemoteTimeout) + defer cancel() + localCtx = user.InjectOrgID(localCtx, userID) + + req := tempopb.PushSpansRequest{ + Batches: nil, + } + for _, j := range indexes { + req.Batches = append(req.Batches, traces[j].trace.Batches...) + } + + c, err := d.generatorsPool.GetClientFor(generator.Addr) + if err != nil { + return err + } + + _, err = c.(tempopb.MetricsGeneratorClient).PushSpans(localCtx, &req) + metricGeneratorPushes.WithLabelValues(generator.Addr).Inc() + if err != nil { + metricGeneratorPushesFailures.WithLabelValues(generator.Addr).Inc() + } + return err + }, func() {}) + + return err +} + // Check implements the grpc healthcheck func (*Distributor) Check(_ context.Context, _ *grpc_health_v1.HealthCheckRequest) (*grpc_health_v1.HealthCheckResponse, error) { return &grpc_health_v1.HealthCheckResponse{Status: grpc_health_v1.HealthCheckResponse_SERVING}, nil diff --git a/modules/distributor/distributor_test.go b/modules/distributor/distributor_test.go index a739a042118..d003694da76 100644 --- a/modules/distributor/distributor_test.go +++ b/modules/distributor/distributor_test.go @@ -9,15 +9,11 @@ import ( "time" "github.com/gogo/status" + "github.com/golang/protobuf/proto" "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/kv" "github.com/grafana/dskit/ring" ring_client "github.com/grafana/dskit/ring/client" - v1_common "github.com/grafana/tempo/pkg/tempopb/common/v1" - v1_resource "github.com/grafana/tempo/pkg/tempopb/resource/v1" - v1 "github.com/grafana/tempo/pkg/tempopb/trace/v1" - - "github.com/golang/protobuf/proto" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -28,9 +24,13 @@ import ( "google.golang.org/grpc/health/grpc_health_v1" "github.com/grafana/tempo/modules/distributor/receiver" + generator_client "github.com/grafana/tempo/modules/generator/client" ingester_client "github.com/grafana/tempo/modules/ingester/client" "github.com/grafana/tempo/modules/overrides" "github.com/grafana/tempo/pkg/tempopb" + v1_common "github.com/grafana/tempo/pkg/tempopb/common/v1" + v1_resource "github.com/grafana/tempo/pkg/tempopb/resource/v1" + v1 "github.com/grafana/tempo/pkg/tempopb/trace/v1" "github.com/grafana/tempo/pkg/util" "github.com/grafana/tempo/pkg/util/test" ) @@ -695,7 +695,7 @@ func prepare(t *testing.T, limits *overrides.Limits, kvStore kv.Client) *Distrib l := logging.Level{} _ = l.Set("error") mw := receiver.MultiTenancyMiddleware() - d, err := New(distributorConfig, clientConfig, ingestersRing, overrides, mw, l, false, prometheus.NewPedanticRegistry()) + d, err := New(distributorConfig, clientConfig, ingestersRing, generator_client.Config{}, nil, overrides, mw, l, false, false, prometheus.NewPedanticRegistry()) require.NoError(t, err) return d diff --git a/modules/generator/client/client.go b/modules/generator/client/client.go new file mode 100644 index 00000000000..e44d472fe07 --- /dev/null +++ b/modules/generator/client/client.go @@ -0,0 +1,73 @@ +package client + +import ( + "flag" + "io" + "time" + + "github.com/grafana/dskit/grpcclient" + ring_client "github.com/grafana/dskit/ring/client" + "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" + "github.com/opentracing/opentracing-go" + "github.com/weaveworks/common/middleware" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" + + "github.com/grafana/tempo/pkg/tempopb" +) + +// Config for a generator client. +type Config struct { + PoolConfig ring_client.PoolConfig `yaml:"pool_config,omitempty"` + RemoteTimeout time.Duration `yaml:"remote_timeout,omitempty"` + GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"` +} + +type Client struct { + tempopb.MetricsGeneratorClient + grpc_health_v1.HealthClient + io.Closer +} + +// RegisterFlags registers flags. +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + cfg.GRPCClientConfig.RegisterFlagsWithPrefix("generator.client", f) + + f.DurationVar(&cfg.PoolConfig.HealthCheckTimeout, "generator.client.healthcheck-timeout", 1*time.Second, "Timeout for healthcheck rpcs.") + f.DurationVar(&cfg.PoolConfig.CheckInterval, "generator.client.healthcheck-interval", 15*time.Second, "Interval to healthcheck generators") + f.BoolVar(&cfg.PoolConfig.HealthCheckEnabled, "generator.client.healthcheck-enabled", true, "Healthcheck generators.") + f.DurationVar(&cfg.RemoteTimeout, "generator.client.timeout", 5*time.Second, "Timeout for generator client RPCs.") +} + +// New returns a new generator client. +func New(addr string, cfg Config) (*Client, error) { + opts := []grpc.DialOption{ + grpc.WithInsecure(), + } + + instrumentationOpts, err := cfg.GRPCClientConfig.DialOption(instrumentation()) + if err != nil { + return nil, err + } + + opts = append(opts, instrumentationOpts...) + conn, err := grpc.Dial(addr, opts...) + if err != nil { + return nil, err + } + return &Client{ + MetricsGeneratorClient: tempopb.NewMetricsGeneratorClient(conn), + HealthClient: grpc_health_v1.NewHealthClient(conn), + Closer: conn, + }, nil +} + +func instrumentation() ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) { + return []grpc.UnaryClientInterceptor{ + otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), + middleware.ClientUserHeaderInterceptor, + }, []grpc.StreamClientInterceptor{ + otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()), + middleware.StreamClientUserHeaderInterceptor, + } +} diff --git a/modules/generator/config.go b/modules/generator/config.go new file mode 100644 index 00000000000..4df97116a82 --- /dev/null +++ b/modules/generator/config.go @@ -0,0 +1,59 @@ +package generator + +import ( + "flag" + "time" + + "github.com/grafana/tempo/modules/generator/processor/servicegraphs" + "github.com/grafana/tempo/modules/generator/processor/spanmetrics" + "github.com/grafana/tempo/modules/generator/remotewrite" +) + +const ( + // RingKey is the key under which we store the metric-generator's ring in the KVStore. + RingKey = "metrics-generator" + + // ringNameForServer is the name of the ring used by the metrics-generator server. + ringNameForServer = "metrics-generator" +) + +// Config for a generator. +type Config struct { + Ring RingConfig `yaml:"ring"` + + Processor ProcessorConfig `yaml:"processor"` + + // CollectionInterval controls how often to collect and remote write metrics. + // Defaults to 15s. + CollectionInterval time.Duration `yaml:"collection_interval"` + + // ExternalLabels are added to any time-series exported by this instance. + ExternalLabels map[string]string `yaml:"external_labels,omitempty"` + + // Add a label `tempo_instance_id` to every metric. This is necessary when running multiple + // instances of the metrics-generator as each instance will push the same time series. + AddInstanceIDLabel bool `yaml:"add_instance_id_label"` + + RemoteWrite remotewrite.Config `yaml:"remote_write,omitempty"` +} + +// RegisterFlagsAndApplyDefaults registers the flags. +func (cfg *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) { + cfg.Ring.RegisterFlagsAndApplyDefaults(prefix, f) + cfg.Processor.RegisterFlagsAndApplyDefaults(prefix, f) + + cfg.CollectionInterval = 15 * time.Second + cfg.AddInstanceIDLabel = true + + cfg.RemoteWrite.RegisterFlagsAndApplyDefaults(prefix, f) +} + +type ProcessorConfig struct { + ServiceGraphs servicegraphs.Config `yaml:"service_graphs"` + SpanMetrics spanmetrics.Config `yaml:"span_metrics"` +} + +func (cfg *ProcessorConfig) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) { + cfg.ServiceGraphs.RegisterFlagsAndApplyDefaults(prefix, f) + cfg.SpanMetrics.RegisterFlagsAndApplyDefaults(prefix, f) +} diff --git a/modules/generator/generator.go b/modules/generator/generator.go new file mode 100644 index 00000000000..df5190ccec0 --- /dev/null +++ b/modules/generator/generator.go @@ -0,0 +1,305 @@ +package generator + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/go-kit/log/level" + "github.com/grafana/dskit/kv" + "github.com/grafana/dskit/ring" + "github.com/grafana/dskit/services" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/storage" + "github.com/weaveworks/common/user" + "go.uber.org/atomic" + + "github.com/grafana/tempo/modules/generator/remotewrite" + "github.com/grafana/tempo/pkg/tempopb" + "github.com/grafana/tempo/pkg/util/log" +) + +const ( + // ringAutoForgetUnhealthyPeriods is how many consecutive timeout periods an unhealthy instance + // in the ring will be automatically removed. + ringAutoForgetUnhealthyPeriods = 2 + + // We use a safe default instead of exposing to config option to the user + // in order to simplify the config. + ringNumTokens = 256 +) + +var ErrReadOnly = errors.New("metrics-generator is shutting down") + +type AppendableFactory func(userID string) storage.Appendable + +type Generator struct { + services.Service + + cfg *Config + overrides metricsGeneratorOverrides + + ringLifecycler *ring.BasicLifecycler + ring *ring.Ring + + instancesMtx sync.RWMutex + instances map[string]*instance + + appendableFactory AppendableFactory + + subservices *services.Manager + subservicesWatcher *services.FailureWatcher + + // When set to true, the generator will refuse incoming pushes + // and will flush any remaining metrics. + readOnly atomic.Bool +} + +// New makes a new Generator. +func New(cfg *Config, overrides metricsGeneratorOverrides, reg prometheus.Registerer) (*Generator, error) { + if cfg.RemoteWrite.Enabled && cfg.RemoteWrite.Client.URL == nil { + return nil, errors.New("remote-write enabled but client URL is not configured") + } + + if cfg.AddInstanceIDLabel { + if cfg.ExternalLabels == nil { + cfg.ExternalLabels = make(map[string]string) + } + cfg.ExternalLabels["tempo_instance_id"] = cfg.Ring.InstanceID + } + + g := &Generator{ + cfg: cfg, + overrides: overrides, + + instances: map[string]*instance{}, + } + + // Lifecycler and ring + ringStore, err := kv.NewClient( + cfg.Ring.KVStore, + ring.GetCodec(), + kv.RegistererWithKVName(prometheus.WrapRegistererWithPrefix("cortex_", reg), "metrics-generator"), + log.Logger, + ) + if err != nil { + return nil, fmt.Errorf("create KV store client: %w", err) + } + + lifecyclerCfg, err := cfg.Ring.toLifecyclerConfig() + if err != nil { + return nil, fmt.Errorf("invalid ring lifecycler config: %w", err) + } + + // Define lifecycler delegates in reverse order (last to be called defined first because they're + // chained via "next delegate"). + delegate := ring.BasicLifecyclerDelegate(g) + delegate = ring.NewLeaveOnStoppingDelegate(delegate, log.Logger) + delegate = ring.NewAutoForgetDelegate(ringAutoForgetUnhealthyPeriods*cfg.Ring.HeartbeatTimeout, delegate, log.Logger) + + g.ringLifecycler, err = ring.NewBasicLifecycler(lifecyclerCfg, ringNameForServer, RingKey, ringStore, delegate, log.Logger, prometheus.WrapRegistererWithPrefix("cortex_", reg)) + if err != nil { + return nil, fmt.Errorf("create ring lifecycler: %w", err) + } + + ringCfg := cfg.Ring.ToRingConfig() + g.ring, err = ring.NewWithStoreClientAndStrategy(ringCfg, ringNameForServer, RingKey, ringStore, ring.NewIgnoreUnhealthyInstancesReplicationStrategy(), prometheus.WrapRegistererWithPrefix("cortex_", reg), log.Logger) + if err != nil { + return nil, fmt.Errorf("create ring client: %w", err) + } + + // Remote write + remoteWriteMetrics := remotewrite.NewMetrics(reg) + g.appendableFactory = func(userID string) storage.Appendable { + return remotewrite.NewAppendable(&cfg.RemoteWrite, log.Logger, userID, remoteWriteMetrics) + } + + g.Service = services.NewBasicService(g.starting, g.running, g.stopping) + return g, nil +} + +func (g *Generator) starting(ctx context.Context) (err error) { + // In case this function will return error we want to unregister the instance + // from the ring. We do it ensuring dependencies are gracefully stopped if they + // were already started. + defer func() { + if err == nil || g.subservices == nil { + return + } + + if stopErr := services.StopManagerAndAwaitStopped(context.Background(), g.subservices); stopErr != nil { + level.Error(log.Logger).Log("msg", "failed to gracefully stop metrics-generator dependencies", "err", stopErr) + } + }() + + g.subservices, err = services.NewManager(g.ringLifecycler, g.ring) + if err != nil { + return fmt.Errorf("unable to start metrics-generator dependencies: %w", err) + } + g.subservicesWatcher = services.NewFailureWatcher() + g.subservicesWatcher.WatchManager(g.subservices) + + err = services.StartManagerAndAwaitHealthy(ctx, g.subservices) + if err != nil { + return fmt.Errorf("unable to start mertics-generator dependencies: %w", err) + } + + return nil +} + +func (g *Generator) running(ctx context.Context) error { + collectMetricsTicker := time.NewTicker(g.cfg.CollectionInterval) + defer collectMetricsTicker.Stop() + + for { + select { + case <-collectMetricsTicker.C: + g.collectMetrics() + + case <-ctx.Done(): + return nil + + case err := <-g.subservicesWatcher.Chan(): + return fmt.Errorf("metrics-generator subservice failed %w", err) + } + } +} + +func (g *Generator) stopping(_ error) error { + // Mark as read-only + g.stopIncomingRequests() + + if g.subservices != nil { + err := services.StopManagerAndAwaitStopped(context.Background(), g.subservices) + if err != nil { + level.Error(log.Logger).Log("msg", "failed to stop metrics-generator dependencies", "err", err) + } + } + + // Wait for generator to stop subservices, then shutdown instances and flush metrics + for id, instance := range g.instances { + err := instance.shutdown(context.Background()) + if err != nil { + level.Warn(log.Logger).Log("msg", "shutdown completed with errors", "instanceID", id, "err", err) + } + } + + return nil +} + +// stopIncomingRequests marks the generator as read-only, refusing push requests +func (g *Generator) stopIncomingRequests() { + g.readOnly.Store(true) +} + +func (g *Generator) PushSpans(ctx context.Context, req *tempopb.PushSpansRequest) (*tempopb.PushResponse, error) { + if g.readOnly.Load() { + return nil, ErrReadOnly + } + + span, ctx := opentracing.StartSpanFromContext(ctx, "generator.PushSpans") + defer span.Finish() + + instanceID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + span.SetTag("instanceID", instanceID) + + instance, err := g.getOrCreateInstance(instanceID) + if err != nil { + return nil, err + } + + err = instance.pushSpans(ctx, req) + if err != nil { + return nil, err + } + + return &tempopb.PushResponse{}, nil +} + +func (g *Generator) getOrCreateInstance(instanceID string) (*instance, error) { + inst, ok := g.getInstanceByID(instanceID) + if ok { + return inst, nil + } + + g.instancesMtx.Lock() + defer g.instancesMtx.Unlock() + inst, ok = g.instances[instanceID] + if !ok { + var err error + inst, err = newInstance(g.cfg, instanceID, g.overrides, g.appendableFactory(instanceID)) + if err != nil { + return nil, err + } + g.instances[instanceID] = inst + } + return inst, nil +} + +func (g *Generator) getInstanceByID(id string) (*instance, bool) { + g.instancesMtx.RLock() + defer g.instancesMtx.RUnlock() + + inst, ok := g.instances[id] + return inst, ok +} + +func (g *Generator) collectMetrics() { + ctx, cancel := context.WithTimeout(context.Background(), g.cfg.CollectionInterval) + defer cancel() + + span, ctx := opentracing.StartSpanFromContext(ctx, "generator.collectMetrics") + defer span.Finish() + + for _, instance := range g.instances { + err := instance.collectAndPushMetrics(ctx) + if err != nil { + level.Error(log.Logger).Log("msg", "collecting and pushing metrics failed", "tenant", instance.instanceID, "err", err) + } + } +} + +func (g *Generator) CheckReady(_ context.Context) error { + if !g.ringLifecycler.IsRegistered() { + return fmt.Errorf("metrics-generator check ready failed: not registered in the ring") + } + + return nil +} + +// OnRingInstanceRegister implements ring.BasicLifecyclerDelegate +func (g *Generator) OnRingInstanceRegister(lifecycler *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.InstanceState, ring.Tokens) { + // When we initialize the metrics-generator instance in the ring we want to start from + // a clean situation, so whatever is the state we set it ACTIVE, while we keep existing + // tokens (if any) or the ones loaded from file. + var tokens []uint32 + if instanceExists { + tokens = instanceDesc.GetTokens() + } + + takenTokens := ringDesc.GetTokens() + newTokens := ring.GenerateTokens(ringNumTokens-len(tokens), takenTokens) + + // Tokens sorting will be enforced by the parent caller. + tokens = append(tokens, newTokens...) + + return ring.ACTIVE, tokens +} + +// OnRingInstanceTokens implements ring.BasicLifecyclerDelegate +func (g *Generator) OnRingInstanceTokens(lifecycler *ring.BasicLifecycler, tokens ring.Tokens) { +} + +// OnRingInstanceStopping implements ring.BasicLifecyclerDelegate +func (g *Generator) OnRingInstanceStopping(lifecycler *ring.BasicLifecycler) { +} + +// OnRingInstanceHeartbeat implements ring.BasicLifecyclerDelegate +func (g *Generator) OnRingInstanceHeartbeat(lifecycler *ring.BasicLifecycler, ringDesc *ring.Desc, instanceDesc *ring.InstanceDesc) { +} diff --git a/modules/generator/generator_ring.go b/modules/generator/generator_ring.go new file mode 100644 index 00000000000..188f6a94e72 --- /dev/null +++ b/modules/generator/generator_ring.go @@ -0,0 +1,73 @@ +package generator + +import ( + "flag" + "fmt" + "os" + "time" + + "github.com/go-kit/log/level" + "github.com/grafana/dskit/flagext" + "github.com/grafana/dskit/kv" + "github.com/grafana/dskit/ring" + + "github.com/grafana/tempo/pkg/util/log" +) + +type RingConfig struct { + KVStore kv.Config `yaml:"kvstore"` + HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` + HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` + + InstanceID string `yaml:"instance_id"` + InstanceInterfaceNames []string `yaml:"instance_interface_names"` + InstanceAddr string `yaml:"instance_addr"` + + // Injected internally + ListenPort int `yaml:"-"` +} + +func (cfg *RingConfig) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) { + cfg.KVStore.RegisterFlagsWithPrefix(prefix, "collectors/", f) + cfg.KVStore.Store = "memberlist" + + cfg.HeartbeatPeriod = 5 * time.Second + cfg.HeartbeatTimeout = 1 * time.Minute + + hostname, err := os.Hostname() + if err != nil { + level.Error(log.Logger).Log("msg", "failed to get hostname", "err", err) + os.Exit(1) + } + cfg.InstanceID = hostname + cfg.InstanceInterfaceNames = []string{"eth0", "en0"} +} + +func (cfg *RingConfig) ToRingConfig() ring.Config { + rc := ring.Config{} + flagext.DefaultValues(&rc) + + rc.KVStore = cfg.KVStore + rc.HeartbeatTimeout = cfg.HeartbeatTimeout + rc.ReplicationFactor = 1 + rc.SubringCacheDisabled = true + + return rc +} + +func (cfg *RingConfig) toLifecyclerConfig() (ring.BasicLifecyclerConfig, error) { + instanceAddr, err := ring.GetInstanceAddr(cfg.InstanceAddr, cfg.InstanceInterfaceNames, log.Logger) + if err != nil { + level.Error(log.Logger).Log("msg", "failed to get instance address", "err", err) + return ring.BasicLifecyclerConfig{}, err + } + + instancePort := cfg.ListenPort + + return ring.BasicLifecyclerConfig{ + ID: cfg.InstanceID, + Addr: fmt.Sprintf("%s:%d", instanceAddr, instancePort), + HeartbeatPeriod: cfg.HeartbeatPeriod, + NumTokens: ringNumTokens, + }, nil +} diff --git a/modules/generator/generator_test.go b/modules/generator/generator_test.go new file mode 100644 index 00000000000..5983021b3db --- /dev/null +++ b/modules/generator/generator_test.go @@ -0,0 +1,217 @@ +package generator + +import ( + "context" + "flag" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "os" + "testing" + "time" + + gokitlog "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/flagext" + "github.com/grafana/dskit/kv/consul" + "github.com/grafana/dskit/ring" + "github.com/grafana/dskit/services" + "github.com/prometheus/client_golang/prometheus" + prometheus_common_config "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + prometheus_config "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/storage/remote" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaveworks/common/user" + + "github.com/grafana/tempo/modules/overrides" + "github.com/grafana/tempo/pkg/tempopb" + v1 "github.com/grafana/tempo/pkg/tempopb/trace/v1" + "github.com/grafana/tempo/pkg/util" + "github.com/grafana/tempo/pkg/util/log" + "github.com/grafana/tempo/pkg/util/test" +) + +const localhost = "localhost" + +type metric struct { + name string + val float64 +} + +func TestGenerator(t *testing.T) { + // logs will be useful to debug problems + // TODO pass the logger as a parameter to generator.New instead of overriding a global variable + log.Logger = gokitlog.NewLogfmtLogger(gokitlog.NewSyncWriter(os.Stdout)) + + rwServer, doneCh := remoteWriteServer(t, expectedMetrics) + defer rwServer.Close() + + cfg := &Config{} + cfg.RegisterFlagsAndApplyDefaults("", &flag.FlagSet{}) + + // Ring + mockStore, _ := consul.NewInMemoryClient(ring.GetCodec(), gokitlog.NewNopLogger(), nil) + + cfg.Ring.KVStore.Mock = mockStore + cfg.Ring.ListenPort = 0 + cfg.Ring.InstanceID = localhost + cfg.Ring.InstanceAddr = localhost + + // Overrides + limitsTestConfig := defaultLimitsTestConfig() + limitsTestConfig.MetricsGeneratorProcessors = map[string]struct{}{"service-graphs": {}, "span-metrics": {}} + limits, err := overrides.NewOverrides(limitsTestConfig) + require.NoError(t, err, "unexpected error creating overrides") + + // Remote write + url, err := url.Parse(fmt.Sprintf("http://%s/receive", rwServer.Listener.Addr().String())) + require.NoError(t, err) + cfg.RemoteWrite.Enabled = true + cfg.RemoteWrite.Client = prometheus_config.DefaultRemoteWriteConfig + cfg.RemoteWrite.Client.URL = &prometheus_common_config.URL{URL: url} + + generator, err := New(cfg, limits, prometheus.NewRegistry()) + require.NoError(t, err, "unexpected error creating generator") + + err = generator.starting(context.Background()) + require.NoError(t, err, "unexpected error starting generator") + + // Send some spans + req := test.MakeBatch(10, nil) + ctx := user.InjectOrgID(context.Background(), util.FakeTenantID) + _, err = generator.PushSpans(ctx, &tempopb.PushSpansRequest{Batches: []*v1.ResourceSpans{req}}) + require.NoError(t, err, "unexpected error pushing spans") + + generator.collectMetrics() + + select { + case <-doneCh: + case <-time.After(time.Second * 5): + t.Fatal("timeout while waiting for remote write server to receive spans") + } +} + +func TestGenerator_shutdown(t *testing.T) { + // logs will be useful to debug problems + // TODO pass the logger as a parameter to generator.New instead of overriding a global variable + log.Logger = gokitlog.NewLogfmtLogger(gokitlog.NewSyncWriter(os.Stdout)) + + rwServer, doneCh := remoteWriteServer(t, expectedMetrics) + defer rwServer.Close() + + cfg := &Config{} + cfg.RegisterFlagsAndApplyDefaults("", &flag.FlagSet{}) + + // Ring + mockStore, _ := consul.NewInMemoryClient(ring.GetCodec(), gokitlog.NewNopLogger(), nil) + + cfg.Ring.KVStore.Mock = mockStore + cfg.Ring.ListenPort = 0 + cfg.Ring.InstanceID = localhost + cfg.Ring.InstanceAddr = localhost + + // Overrides + limitsTestConfig := defaultLimitsTestConfig() + limitsTestConfig.MetricsGeneratorProcessors = map[string]struct{}{"service-graphs": {}, "span-metrics": {}} + limits, err := overrides.NewOverrides(limitsTestConfig) + require.NoError(t, err, "unexpected error creating overrides") + + // Remote write + url, err := url.Parse(fmt.Sprintf("http://%s/receive", rwServer.Listener.Addr().String())) + require.NoError(t, err) + cfg.RemoteWrite.Enabled = true + cfg.RemoteWrite.Client = prometheus_config.DefaultRemoteWriteConfig + cfg.RemoteWrite.Client.URL = &prometheus_common_config.URL{URL: url} + + // Set incredibly high collection interval + cfg.CollectionInterval = time.Hour + + generator, err := New(cfg, limits, prometheus.NewRegistry()) + require.NoError(t, err, "unexpected error creating generator") + + err = services.StartAndAwaitRunning(context.Background(), generator) + require.NoError(t, err, "unexpected error starting generator") + + // Send some spans + req := test.MakeBatch(10, nil) + ctx := user.InjectOrgID(context.Background(), util.FakeTenantID) + _, err = generator.PushSpans(ctx, &tempopb.PushSpansRequest{Batches: []*v1.ResourceSpans{req}}) + require.NoError(t, err, "unexpected error pushing spans") + + err = services.StopAndAwaitTerminated(context.Background(), generator) + require.NoError(t, err, "failed to terminate metrics-generator") + + select { + case <-doneCh: + case <-time.After(time.Second * 5): + t.Fatal("timeout while waiting for remote write server to receive spans") + } +} + +func remoteWriteServer(t *testing.T, expected []metric) (*httptest.Server, chan struct{}) { + doneCh := make(chan struct{}) + + mux := http.NewServeMux() + mux.HandleFunc("/receive", func(w http.ResponseWriter, r *http.Request) { + req, err := remote.DecodeWriteRequest(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + level.Info(log.Logger).Log("msg", "received remote write", "body", req.String()) + + for i, ts := range req.Timeseries { + m := make(model.Metric, len(ts.Labels)) + for _, l := range ts.Labels { + m[model.LabelName(l.Name)] = model.LabelValue(l.Value) + } + + // don't bother testing timeseries with exemplars for now, this is already covered by other tests + if len(ts.Exemplars) != 0 { + continue + } + + if i >= len(expected) { + assert.Fail(t, "received unexpected metric", "%s", m.String()) + continue + } + + assert.Equal(t, expected[i].name, m.String()) + + assert.Len(t, ts.Samples, 1) + assert.Equal(t, expected[i].val, ts.Samples[0].Value) + } + close(doneCh) + }) + + return httptest.NewServer(mux), doneCh +} + +var expectedMetrics = []metric{ + {`traces_spanmetrics_calls_total{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", tempo_instance_id="localhost"}`, 10}, + {`traces_spanmetrics_duration_seconds_count{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", tempo_instance_id="localhost"}`, 10}, + {`traces_spanmetrics_duration_seconds_sum{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", tempo_instance_id="localhost"}`, 10}, + {`traces_spanmetrics_duration_seconds_bucket{le="0.002", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", tempo_instance_id="localhost"}`, 0}, + {`traces_spanmetrics_duration_seconds_bucket{le="0.004", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", tempo_instance_id="localhost"}`, 0}, + {`traces_spanmetrics_duration_seconds_bucket{le="0.008", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", tempo_instance_id="localhost"}`, 0}, + {`traces_spanmetrics_duration_seconds_bucket{le="0.016", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", tempo_instance_id="localhost"}`, 0}, + {`traces_spanmetrics_duration_seconds_bucket{le="0.032", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", tempo_instance_id="localhost"}`, 0}, + {`traces_spanmetrics_duration_seconds_bucket{le="0.064", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", tempo_instance_id="localhost"}`, 0}, + {`traces_spanmetrics_duration_seconds_bucket{le="0.128", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", tempo_instance_id="localhost"}`, 0}, + {`traces_spanmetrics_duration_seconds_bucket{le="0.256", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", tempo_instance_id="localhost"}`, 0}, + {`traces_spanmetrics_duration_seconds_bucket{le="0.512", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", tempo_instance_id="localhost"}`, 0}, + {`traces_spanmetrics_duration_seconds_bucket{le="1.024", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", tempo_instance_id="localhost"}`, 10}, + {`traces_spanmetrics_duration_seconds_bucket{le="2.048", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", tempo_instance_id="localhost"}`, 10}, + {`traces_spanmetrics_duration_seconds_bucket{le="4.096", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", tempo_instance_id="localhost"}`, 10}, + {`traces_spanmetrics_duration_seconds_bucket{le="+Inf", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", tempo_instance_id="localhost"}`, 10}, +} + +func defaultLimitsTestConfig() overrides.Limits { + limits := overrides.Limits{} + flagext.DefaultValues(&limits) + return limits +} diff --git a/modules/generator/instance.go b/modules/generator/instance.go new file mode 100644 index 00000000000..095a61401ab --- /dev/null +++ b/modules/generator/instance.go @@ -0,0 +1,279 @@ +package generator + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/go-kit/log/level" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/prometheus/storage" + "github.com/weaveworks/common/tracing" + + "github.com/grafana/tempo/modules/generator/processor" + "github.com/grafana/tempo/modules/generator/processor/servicegraphs" + "github.com/grafana/tempo/modules/generator/processor/spanmetrics" + "github.com/grafana/tempo/pkg/tempopb" + "github.com/grafana/tempo/pkg/util/log" +) + +var ( + allSupportedProcessors = []string{servicegraphs.Name, spanmetrics.Name} + + metricActiveProcessors = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "tempo", + Name: "metrics_generator_active_processors", + Help: "The active processors per tenant", + }, []string{"tenant", "processor"}) + metricActiveProcessorsUpdateFailed = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Name: "metrics_generator_active_processors_update_failed_total", + Help: "The total number of times updating the active processors failed", + }, []string{"tenant"}) + metricSpansIngested = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Name: "metrics_generator_spans_received_total", + Help: "The total number of spans received per tenant", + }, []string{"tenant"}) + metricBytesIngested = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Name: "metrics_generator_bytes_received_total", + Help: "The total number of proto bytes received per tenant", + }, []string{"tenant"}) +) + +type instance struct { + cfg *Config + + instanceID string + overrides metricsGeneratorOverrides + + registry processor.Registry + appendable storage.Appendable + + // processorsMtx protects the processors map, not the processors itself + processorsMtx sync.RWMutex + processors map[string]processor.Processor + + shutdownCh chan struct{} +} + +func newInstance(cfg *Config, instanceID string, overrides metricsGeneratorOverrides, appendable storage.Appendable) (*instance, error) { + i := &instance{ + cfg: cfg, + instanceID: instanceID, + overrides: overrides, + + registry: processor.NewRegistry(cfg.ExternalLabels), + appendable: appendable, + + processors: make(map[string]processor.Processor), + + shutdownCh: make(chan struct{}, 1), + } + + err := i.updateProcessors(i.overrides.MetricsGeneratorProcessors(i.instanceID)) + if err != nil { + return nil, fmt.Errorf("could not initialize processors: %w", err) + } + go i.watchOverrides() + + return i, nil +} + +func (i *instance) watchOverrides() { + reloadPeriod := 10 * time.Second + + ticker := time.NewTicker(reloadPeriod) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + err := i.updateProcessors(i.overrides.MetricsGeneratorProcessors(i.instanceID)) + if err != nil { + metricActiveProcessorsUpdateFailed.WithLabelValues(i.instanceID).Inc() + level.Error(log.Logger).Log("msg", "updating the processors failed", "err", err, "tenant", i.instanceID) + } + + case <-i.shutdownCh: + return + } + } +} + +func (i *instance) updateProcessors(desiredProcessors map[string]struct{}) error { + i.processorsMtx.RLock() + toAdd, toRemove := i.diffProcessors(desiredProcessors) + i.processorsMtx.RUnlock() + + if len(toAdd) == 0 && len(toRemove) == 0 { + return nil + } + + i.processorsMtx.Lock() + defer i.processorsMtx.Unlock() + + for _, processorName := range toAdd { + err := i.addProcessor(processorName) + if err != nil { + return err + } + } + for _, processorName := range toRemove { + i.removeProcessor(processorName) + } + + i.updateProcessorMetrics() + + return nil +} + +// diffProcessors compares the existings processors with desiredProcessors. Must be called under a +// read lock. +func (i *instance) diffProcessors(desiredProcessors map[string]struct{}) (toAdd []string, toRemove []string) { + for processorName := range desiredProcessors { + if _, ok := i.processors[processorName]; !ok { + toAdd = append(toAdd, processorName) + } + } + for processorName := range i.processors { + if _, ok := desiredProcessors[processorName]; !ok { + toRemove = append(toRemove, processorName) + } + } + return toAdd, toRemove +} + +// addProcessor registers the processor and adds it to the processors map. Must be called under a +// write lock. +func (i *instance) addProcessor(processorName string) error { + level.Debug(log.Logger).Log("msg", "adding processor", "processorName", processorName, "tenant", i.instanceID) + + var newProcessor processor.Processor + switch processorName { + case spanmetrics.Name: + newProcessor = spanmetrics.New(i.cfg.Processor.SpanMetrics, i.instanceID) + case servicegraphs.Name: + newProcessor = servicegraphs.New(i.cfg.Processor.ServiceGraphs, i.instanceID) + default: + level.Error(log.Logger).Log( + "msg", fmt.Sprintf("processor does not exist, supported processors: [%s]", strings.Join(allSupportedProcessors, ", ")), + "processorName", processorName, + "tenant", i.instanceID, + ) + return fmt.Errorf("unknown processor %s", processorName) + } + + // check the processor wasn't added in the meantime + if _, ok := i.processors[processorName]; ok { + return nil + } + + err := newProcessor.RegisterMetrics(i.registry) + if err != nil { + return fmt.Errorf("error registering metrics for %s: %w", processorName, err) + } + + i.processors[processorName] = newProcessor + + return nil +} + +// removeProcessor removes the processor from the processors map and shuts it down. Must be called +// under a write lock. +func (i *instance) removeProcessor(processorName string) { + level.Debug(log.Logger).Log("msg", "removing processor", "processorName", processorName, "tenant", i.instanceID) + + deletedProcessor, ok := i.processors[processorName] + if !ok { + return + } + + delete(i.processors, processorName) + + err := deletedProcessor.Shutdown(context.Background(), i.registry) + if err != nil { + level.Error(log.Logger).Log("msg", "processor did not shutdown cleanly", "name", deletedProcessor.Name(), "err", err, "tenant", i.instanceID) + } +} + +// updateProcessorMetrics updates the active processor metrics. Must be called under a read lock. +func (i *instance) updateProcessorMetrics() { + for _, processorName := range allSupportedProcessors { + isPresent := 0.0 + if _, ok := i.processors[processorName]; ok { + isPresent = 1.0 + } + metricActiveProcessors.WithLabelValues(i.instanceID, processorName).Set(isPresent) + } +} + +func (i *instance) pushSpans(ctx context.Context, req *tempopb.PushSpansRequest) error { + i.updatePushMetrics(req) + + i.processorsMtx.RLock() + defer i.processorsMtx.RUnlock() + + for _, processor := range i.processors { + if err := processor.PushSpans(ctx, req); err != nil { + return err + } + } + + return nil +} + +func (i *instance) updatePushMetrics(req *tempopb.PushSpansRequest) { + size := 0 + spanCount := 0 + for _, b := range req.Batches { + size += b.Size() + for _, ils := range b.InstrumentationLibrarySpans { + spanCount += len(ils.Spans) + } + } + metricBytesIngested.WithLabelValues(i.instanceID).Add(float64(size)) + metricSpansIngested.WithLabelValues(i.instanceID).Add(float64(spanCount)) +} + +func (i *instance) collectAndPushMetrics(ctx context.Context) error { + span, ctx := opentracing.StartSpanFromContext(ctx, "instance.collectAndPushMetrics") + defer span.Finish() + + traceID, _ := tracing.ExtractTraceID(ctx) + level.Info(log.Logger).Log("msg", "collecting metrics", "tenant", i.instanceID, "traceID", traceID) + + appender := i.appendable.Appender(ctx) + + err := i.registry.Gather(appender) + if err != nil { + return err + } + + return appender.Commit() +} + +// shutdown stops the instance and flushes any remaining data. After shutdown +// is called pushSpans should not be called anymore. +func (i *instance) shutdown(ctx context.Context) error { + close(i.shutdownCh) + + err := i.collectAndPushMetrics(ctx) + if err != nil { + level.Error(log.Logger).Log("msg", "collecting metrics failed at shutdown", "tenant", i.instanceID, "err", err) + } + + i.processorsMtx.Lock() + defer i.processorsMtx.Unlock() + + for processorName := range i.processors { + i.removeProcessor(processorName) + } + + return err +} diff --git a/modules/generator/instance_test.go b/modules/generator/instance_test.go new file mode 100644 index 00000000000..fb45c882558 --- /dev/null +++ b/modules/generator/instance_test.go @@ -0,0 +1,121 @@ +package generator + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/grafana/tempo/modules/generator/processor/servicegraphs" + "github.com/grafana/tempo/modules/generator/remotewrite" + "github.com/grafana/tempo/pkg/tempopb" + v1 "github.com/grafana/tempo/pkg/tempopb/trace/v1" + "github.com/grafana/tempo/pkg/util/test" +) + +func Test_instance_concurrency(t *testing.T) { + instance, err := newInstance(&Config{}, "test", &mockOverrides{}, &remotewrite.NoopAppender{}) + assert.NoError(t, err) + + end := make(chan struct{}) + + accessor := func(f func()) { + for { + select { + case <-end: + return + default: + f() + } + } + } + + go accessor(func() { + req := test.MakeBatch(1, nil) + err := instance.pushSpans(context.Background(), &tempopb.PushSpansRequest{Batches: []*v1.ResourceSpans{req}}) + assert.NoError(t, err) + }) + + go accessor(func() { + err := instance.collectAndPushMetrics(context.Background()) + assert.NoError(t, err) + }) + + go accessor(func() { + processors := map[string]struct{}{ + "span-metrics": {}, + } + err := instance.updateProcessors(processors) + assert.NoError(t, err) + }) + + go accessor(func() { + processors := map[string]struct{}{ + "service-graphs": {}, + } + err := instance.updateProcessors(processors) + assert.NoError(t, err) + }) + + time.Sleep(100 * time.Millisecond) + + err = instance.shutdown(context.Background()) + assert.NoError(t, err) + + time.Sleep(10 * time.Millisecond) + close(end) + +} + +func Test_instance_updateProcessors(t *testing.T) { + instance, err := newInstance(&Config{}, "test", &mockOverrides{}, &remotewrite.NoopAppender{}) + assert.NoError(t, err) + + // shutdown the instance to stop the update goroutine + err = instance.shutdown(context.Background()) + assert.NoError(t, err) + + // no processors should be present initially + assert.Len(t, instance.processors, 0) + + t.Run("add new processor", func(t *testing.T) { + processors := map[string]struct{}{ + servicegraphs.Name: {}, + } + err := instance.updateProcessors(processors) + assert.NoError(t, err) + + assert.Len(t, instance.processors, 1) + assert.Equal(t, instance.processors[servicegraphs.Name].Name(), servicegraphs.Name) + }) + + t.Run("add unknown processor", func(t *testing.T) { + processors := map[string]struct{}{ + "span-metricsss": {}, // typo in the overrides + } + err := instance.updateProcessors(processors) + assert.Error(t, err) + + // existing processors should not be removed when adding a new processor fails + assert.Len(t, instance.processors, 1) + assert.Equal(t, instance.processors[servicegraphs.Name].Name(), servicegraphs.Name) + }) + + t.Run("remove processor", func(t *testing.T) { + err := instance.updateProcessors(nil) + assert.NoError(t, err) + + assert.Len(t, instance.processors, 0) + }) +} + +type mockOverrides struct { + processors map[string]struct{} +} + +var _ metricsGeneratorOverrides = (*mockOverrides)(nil) + +func (m *mockOverrides) MetricsGeneratorProcessors(userID string) map[string]struct{} { + return m.processors +} diff --git a/modules/generator/overrides.go b/modules/generator/overrides.go new file mode 100644 index 00000000000..741c3de2623 --- /dev/null +++ b/modules/generator/overrides.go @@ -0,0 +1,11 @@ +package generator + +import ( + "github.com/grafana/tempo/modules/overrides" +) + +type metricsGeneratorOverrides interface { + MetricsGeneratorProcessors(userID string) map[string]struct{} +} + +var _ metricsGeneratorOverrides = (*overrides.Overrides)(nil) diff --git a/modules/generator/processor/interface.go b/modules/generator/processor/interface.go new file mode 100644 index 00000000000..d56f5ad986e --- /dev/null +++ b/modules/generator/processor/interface.go @@ -0,0 +1,24 @@ +package processor + +import ( + "context" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/grafana/tempo/pkg/tempopb" +) + +type Processor interface { + // Name returns the name of the processor. + Name() string + + // RegisterMetrics registers metrics that are emitted by this processor. + RegisterMetrics(reg prometheus.Registerer) error + + // PushSpans processes a batch of spans and updates the metrics registered in RegisterMetrics. + PushSpans(ctx context.Context, req *tempopb.PushSpansRequest) error + + // Shutdown releases any resources allocated by the processor and unregisters metrics registered + // by RegisterMetrics. Once the processor is shut down, PushSpans should not be called anymore. + Shutdown(ctx context.Context, reg prometheus.Registerer) error +} diff --git a/modules/generator/processor/registry.go b/modules/generator/processor/registry.go new file mode 100644 index 00000000000..6226e1faa7d --- /dev/null +++ b/modules/generator/processor/registry.go @@ -0,0 +1,159 @@ +package processor + +import ( + "fmt" + "math" + "time" + + "github.com/prometheus/client_golang/prometheus" + prometheus_model "github.com/prometheus/client_model/go" + "github.com/prometheus/prometheus/model/exemplar" + prometheus_labels "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" +) + +// Registry is a prometheus.Registerer that can gather metrics and push them directly into a +// Prometheus storage.Appender. +// +// Currently, only counters and histograms are supported. Descriptions are ignored. +type Registry struct { + prometheus.Registerer + + gatherer prometheus.Gatherer + + now func() time.Time +} + +func NewRegistry(externalLabels map[string]string) Registry { + registry := prometheus.NewRegistry() + + registerer := prometheus.WrapRegistererWith(externalLabels, registry) + + return Registry{ + Registerer: registerer, + gatherer: registry, + now: time.Now, + } +} + +func (r *Registry) Gather(appender storage.Appender) error { + metricFamilies, err := r.gatherer.Gather() + if err != nil { + return err + } + + timestamp := r.now().UnixMilli() + + for _, metricFamily := range metricFamilies { + + switch metricFamily.GetType() { + case prometheus_model.MetricType_COUNTER: + for _, metric := range metricFamily.GetMetric() { + labels := labelPairsToLabels(metric.Label) + labels = appendWithLabel(labels, "__name__", metricFamily.GetName()) + + _, err := appender.Append(0, labels, timestamp, metric.GetCounter().GetValue()) + if err != nil { + return err + } + } + + case prometheus_model.MetricType_HISTOGRAM: + for _, metric := range metricFamily.GetMetric() { + labels := labelPairsToLabels(metric.Label) + + histogram := metric.GetHistogram() + + // _count + countLabels := copyWithLabel(labels, "__name__", fmt.Sprintf("%s_count", metricFamily.GetName())) + _, err := appender.Append(0, countLabels, timestamp, float64(histogram.GetSampleCount())) + if err != nil { + return err + } + + // _sum + sumLabels := copyWithLabel(labels, "__name__", fmt.Sprintf("%s_sum", metricFamily.GetName())) + _, err = appender.Append(0, sumLabels, timestamp, histogram.GetSampleSum()) + if err != nil { + return err + } + + addedInfBucket := false + + // _bucket + bucketLabels := copyWithLabel(labels, "__name__", fmt.Sprintf("%s_bucket", metricFamily.GetName())) + for _, bucket := range histogram.GetBucket() { + + if bucket.GetUpperBound() == math.Inf(1) { + addedInfBucket = true + } + + bucketWithLeLabels := copyWithLabel(bucketLabels, "le", fmt.Sprintf("%g", bucket.GetUpperBound())) + _, err = appender.Append(0, bucketWithLeLabels, timestamp, float64(bucket.GetCumulativeCount())) + if err != nil { + return err + } + + e := bucket.GetExemplar() + if e != nil { + _, err = appender.AppendExemplar(0, bucketWithLeLabels, exemplar.Exemplar{ + Labels: labelPairsToLabels(e.GetLabel()), + Value: e.GetValue(), + Ts: e.GetTimestamp().AsTime().UnixMilli(), + HasTs: e.GetTimestamp() != nil, + }) + if err != nil { + return err + } + } + } + + if !addedInfBucket { + // _bucket, le="+Inf" + bucketInfLabels := copyWithLabel(bucketLabels, "le", "+Inf") + _, err = appender.Append(0, bucketInfLabels, timestamp, float64(histogram.GetSampleCount())) + if err != nil { + return err + } + } + } + + default: + return fmt.Errorf("metric type %s is not supported by Registry", metricFamily.GetType()) + } + } + + return nil +} + +// SetTimeNow is used for stubbing time.Now in testing +func (r *Registry) SetTimeNow(now func() time.Time) { + r.now = now +} + +func labelPairsToLabels(labelPairs []*prometheus_model.LabelPair) prometheus_labels.Labels { + labels := make(prometheus_labels.Labels, len(labelPairs)) + + for i, labelPair := range labelPairs { + labels[i] = prometheus_labels.Label{ + Name: labelPair.GetName(), + Value: labelPair.GetValue(), + } + } + + return labels +} + +func appendWithLabel(labels prometheus_labels.Labels, name, value string) prometheus_labels.Labels { + return append(labels, prometheus_labels.Label{ + Name: name, + Value: value, + }) +} + +func copyWithLabel(labels prometheus_labels.Labels, name, value string) prometheus_labels.Labels { + labelsCopy := make(prometheus_labels.Labels, len(labels), len(labels)+1) + copy(labelsCopy, labels) + + return appendWithLabel(labelsCopy, name, value) +} diff --git a/modules/generator/processor/registry_test.go b/modules/generator/processor/registry_test.go new file mode 100644 index 00000000000..62267993154 --- /dev/null +++ b/modules/generator/processor/registry_test.go @@ -0,0 +1,209 @@ +package processor + +import ( + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/assert" + + "github.com/grafana/tempo/modules/generator/processor/util/test" +) + +func TestRegistry(t *testing.T) { + now := time.Now() + theTime := &now + + registry := NewRegistry(nil) + registry.SetTimeNow(func() time.Time { + return *theTime + }) + + // Register some Prometheus metrics + counter := prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "test", + Name: "my_counter", + Help: "This is a test counter", + }) + counterVec := prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "test", + Name: "my_counter_vec", + Help: "This is a test counter vec", + }, []string{"label1", "label2"}) + histogram := prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "test", + Name: "my_histogram", + Help: "This is a test histogram", + Buckets: prometheus.LinearBuckets(1, 1, 3), + }) + histogramVec := prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "test", + Name: "my_histogram_vec", + Help: "This is a test histogram vec", + Buckets: prometheus.ExponentialBuckets(1, 2, 3), + }, []string{"label1"}) + + registry.MustRegister(counter, counterVec, histogram, histogramVec) + + // Collect a first time + testAppender := &test.Appender{} + err := registry.Gather(testAppender) + assert.NoError(t, err) + + expectedMetrics := []test.Metric{ + {Labels: `{__name__="test_my_counter"}`, Value: 0}, + {Labels: `{__name__="test_my_histogram_count"}`, Value: 0}, + {Labels: `{__name__="test_my_histogram_sum"}`, Value: 0}, + {Labels: `{__name__="test_my_histogram_bucket", le="1"}`, Value: 0}, + {Labels: `{__name__="test_my_histogram_bucket", le="2"}`, Value: 0}, + {Labels: `{__name__="test_my_histogram_bucket", le="3"}`, Value: 0}, + {Labels: `{__name__="test_my_histogram_bucket", le="+Inf"}`, Value: 0}, + } + testAppender.ContainsAll(t, expectedMetrics, *theTime) + + *theTime = (*theTime).Add(5 * time.Second) + + // Modify the metrics + counter.Inc() + counterVec.WithLabelValues("value1", "value2").Inc() + counterVec.WithLabelValues("value1", "anotherValue2").Add(2) + histogram.Observe(2) + histogram.Observe(3) + histogram.Observe(4) + histogramVec.WithLabelValues("value1").Observe(1) + histogramVec.WithLabelValues("value2").Observe(2) + + // Collect a second time + testAppender = &test.Appender{} + err = registry.Gather(testAppender) + assert.NoError(t, err) + + expectedMetrics = []test.Metric{ + {Labels: `{__name__="test_my_counter"}`, Value: 1}, + {Labels: `{label1="value1", label2="value2", __name__="test_my_counter_vec"}`, Value: 1}, + {Labels: `{label1="value1", label2="anotherValue2", __name__="test_my_counter_vec"}`, Value: 2}, + {Labels: `{__name__="test_my_histogram_count"}`, Value: 3}, + {Labels: `{__name__="test_my_histogram_sum"}`, Value: 9}, + {Labels: `{__name__="test_my_histogram_bucket", le="1"}`, Value: 0}, + {Labels: `{__name__="test_my_histogram_bucket", le="2"}`, Value: 1}, + {Labels: `{__name__="test_my_histogram_bucket", le="3"}`, Value: 2}, + {Labels: `{__name__="test_my_histogram_bucket", le="+Inf"}`, Value: 3}, + {Labels: `{label1="value1", __name__="test_my_histogram_vec_count"}`, Value: 1}, + {Labels: `{label1="value1", __name__="test_my_histogram_vec_sum"}`, Value: 1}, + {Labels: `{label1="value1", __name__="test_my_histogram_vec_bucket", le="1"}`, Value: 1}, + {Labels: `{label1="value1", __name__="test_my_histogram_vec_bucket", le="2"}`, Value: 1}, + {Labels: `{label1="value1", __name__="test_my_histogram_vec_bucket", le="4"}`, Value: 1}, + {Labels: `{label1="value1", __name__="test_my_histogram_vec_bucket", le="+Inf"}`, Value: 1}, + {Labels: `{label1="value2", __name__="test_my_histogram_vec_count"}`, Value: 1}, + {Labels: `{label1="value2", __name__="test_my_histogram_vec_sum"}`, Value: 2}, + {Labels: `{label1="value2", __name__="test_my_histogram_vec_bucket", le="1"}`, Value: 0}, + {Labels: `{label1="value2", __name__="test_my_histogram_vec_bucket", le="2"}`, Value: 1}, + {Labels: `{label1="value2", __name__="test_my_histogram_vec_bucket", le="4"}`, Value: 1}, + {Labels: `{label1="value2", __name__="test_my_histogram_vec_bucket", le="+Inf"}`, Value: 1}, + } + testAppender.ContainsAll(t, expectedMetrics, *theTime) +} + +func TestRegistry_exemplars(t *testing.T) { + now := time.Now() + theTime := &now + + registry := NewRegistry(nil) + registry.SetTimeNow(func() time.Time { + return *theTime + }) + + // Register a Prometheus histogram + histogram := prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "test", + Name: "my_histogram", + Help: "This is a test histogram", + Buckets: prometheus.LinearBuckets(1, 1, 3), + }) + + registry.MustRegister(histogram) + + // Observe some values with exemplars + histogram.(prometheus.ExemplarObserver).ObserveWithExemplar( + 2, prometheus.Labels{"traceID": "1112"}, + ) + histogram.(prometheus.ExemplarObserver).ObserveWithExemplar( + 3, prometheus.Labels{"traceID": "1113"}, + ) + histogram.(prometheus.ExemplarObserver).ObserveWithExemplar( + 4, prometheus.Labels{"traceID": "1114"}, + ) + + // Collect metrics + testAppender := &test.Appender{} + err := registry.Gather(testAppender) + assert.NoError(t, err) + + expectedMetrics := []test.Metric{ + {Labels: `{__name__="test_my_histogram_count"}`, Value: 3}, + {Labels: `{__name__="test_my_histogram_sum"}`, Value: 9}, + {Labels: `{__name__="test_my_histogram_bucket", le="1"}`, Value: 0}, + {Labels: `{__name__="test_my_histogram_bucket", le="2"}`, Value: 1}, + {Labels: `{__name__="test_my_histogram_bucket", le="3"}`, Value: 2}, + {Labels: `{__name__="test_my_histogram_bucket", le="+Inf"}`, Value: 3}, + } + testAppender.ContainsAll(t, expectedMetrics, *theTime) + + expectedLabels := []string{ + `{__name__="test_my_histogram_bucket", le="2"}`, + `{__name__="test_my_histogram_bucket", le="3"}`, + `{__name__="test_my_histogram_bucket", le="+Inf"}`, + } + expectedExemplars := []exemplar.Exemplar{ + {Labels: []labels.Label{{Name: "traceID", Value: "1112"}}, Value: 2, Ts: theTime.UnixMilli(), HasTs: true}, + {Labels: []labels.Label{{Name: "traceID", Value: "1113"}}, Value: 3, Ts: theTime.UnixMilli(), HasTs: true}, + {Labels: []labels.Label{{Name: "traceID", Value: "1114"}}, Value: 4, Ts: theTime.UnixMilli(), HasTs: true}, + } + testAppender.ContainsAllExemplars(t, expectedLabels, expectedExemplars) +} + +func TestRegisterer_externalLabels(t *testing.T) { + now := time.Now() + theTime := &now + + registry := NewRegistry(map[string]string{ + "external_label1": "constant_value1", + "external_label2": "constant_value2", + }) + registry.SetTimeNow(func() time.Time { + return *theTime + }) + + // Register some Prometheus metrics + counter := prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "test", + Name: "my_counter", + Help: "This is a test counter", + }) + histogram := prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "test", + Name: "my_histogram", + Help: "This is a test histogram", + Buckets: prometheus.LinearBuckets(1, 1, 3), + }) + + registry.MustRegister(counter, histogram) + + // Collect the metrics + testAppender := &test.Appender{} + err := registry.Gather(testAppender) + assert.NoError(t, err) + + expectedMetrics := []test.Metric{ + {Labels: `{external_label1="constant_value1", external_label2="constant_value2", __name__="test_my_counter"}`, Value: 0}, + {Labels: `{external_label1="constant_value1", external_label2="constant_value2", __name__="test_my_histogram_count"}`, Value: 0}, + {Labels: `{external_label1="constant_value1", external_label2="constant_value2", __name__="test_my_histogram_sum"}`, Value: 0}, + {Labels: `{external_label1="constant_value1", external_label2="constant_value2", __name__="test_my_histogram_bucket", le="1"}`, Value: 0}, + {Labels: `{external_label1="constant_value1", external_label2="constant_value2", __name__="test_my_histogram_bucket", le="2"}`, Value: 0}, + {Labels: `{external_label1="constant_value1", external_label2="constant_value2", __name__="test_my_histogram_bucket", le="3"}`, Value: 0}, + {Labels: `{external_label1="constant_value1", external_label2="constant_value2", __name__="test_my_histogram_bucket", le="+Inf"}`, Value: 0}, + } + testAppender.ContainsAll(t, expectedMetrics, *theTime) +} diff --git a/modules/generator/processor/servicegraphs/config.go b/modules/generator/processor/servicegraphs/config.go new file mode 100644 index 00000000000..13343cc7528 --- /dev/null +++ b/modules/generator/processor/servicegraphs/config.go @@ -0,0 +1,35 @@ +package servicegraphs + +import ( + "flag" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + Name = "service-graphs" +) + +type Config struct { + // Wait is the value to wait for an edge to be completed + Wait time.Duration `mapstructure:"wait"` + // MaxItems is the amount of edges that will be stored in the storeMap + MaxItems int `mapstructure:"max_items"` + + // Workers is the amount of workers that will be used to process the edges + Workers int `mapstructure:"workers"` + + // Buckets for latency histogram in seconds. + HistogramBuckets []float64 `yaml:"histogram_buckets"` + + // SuccessCodes *successCodes `mapstructure:"success_codes"` +} + +func (cfg *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) { + cfg.Wait = 10 * time.Second + cfg.MaxItems = 10_000 + cfg.Workers = 10 + // TODO: Revisit this default value. + cfg.HistogramBuckets = prometheus.ExponentialBuckets(0.1, 2, 8) +} diff --git a/modules/generator/processor/servicegraphs/servicegraphs.go b/modules/generator/processor/servicegraphs/servicegraphs.go new file mode 100644 index 00000000000..e50beb7069a --- /dev/null +++ b/modules/generator/processor/servicegraphs/servicegraphs.go @@ -0,0 +1,277 @@ +package servicegraphs + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "time" + + "github.com/go-kit/log/level" + "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + gen "github.com/grafana/tempo/modules/generator/processor" + "github.com/grafana/tempo/modules/generator/processor/servicegraphs/store" + "github.com/grafana/tempo/modules/generator/processor/util" + "github.com/grafana/tempo/pkg/tempopb" + v1 "github.com/grafana/tempo/pkg/tempopb/trace/v1" + "github.com/grafana/tempo/pkg/util/log" +) + +var ( + metricDroppedSpans = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Name: "metrics_generator_processor_service_graphs_dropped_spans", + Help: "Number of dropped spans.", + }, []string{"tenant"}) + metricUnpairedEdges = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Name: "metrics_generator_processor_service_graphs_unpaired_edges", + Help: "Number of expired edges (client or server).", + }, []string{"tenant"}) +) + +type tooManySpansError struct { + droppedSpans int +} + +func (t tooManySpansError) Error() string { + return fmt.Sprintf("dropped %d spans", t.droppedSpans) +} + +type processor struct { + cfg Config + + store store.Store + + // completed edges are pushed through this channel to be processed. + collectCh chan string + closeCh chan struct{} + + serviceGraphRequestTotal *prometheus.CounterVec + serviceGraphRequestFailedTotal *prometheus.CounterVec + serviceGraphRequestServerHistogram *prometheus.HistogramVec + serviceGraphRequestClientHistogram *prometheus.HistogramVec + serviceGraphUnpairedSpansTotal *prometheus.CounterVec + serviceGraphDroppedSpansTotal *prometheus.CounterVec + + metricDroppedSpans prometheus.Counter + metricUnpairedEdges prometheus.Counter +} + +func New(cfg Config, tenant string) gen.Processor { + p := &processor{ + cfg: cfg, + + collectCh: make(chan string, cfg.MaxItems), + closeCh: make(chan struct{}, 1), + + // TODO we only have to pass tenant to be used in instrumentation, can we avoid doing this somehow? + metricDroppedSpans: metricDroppedSpans.WithLabelValues(tenant), + metricUnpairedEdges: metricUnpairedEdges.WithLabelValues(tenant), + } + + p.store = store.NewStore(cfg.Wait, cfg.MaxItems, p.collectEdge) + + expirationTicker := time.NewTicker(2 * time.Second) + for i := 0; i < cfg.Workers; i++ { + go func() { + for { + select { + case k := <-p.collectCh: + p.store.EvictEdge(k) + + // Periodically cleans expired edges from the store + case <-expirationTicker.C: + p.store.Expire() + + case <-p.closeCh: + return + } + } + }() + } + + return p +} + +func (p *processor) Name() string { return Name } + +func (p *processor) RegisterMetrics(reg prometheus.Registerer) error { + p.serviceGraphRequestTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "traces", + Name: "service_graph_request_total", + Help: "Total count of requests between two nodes", + }, []string{"client", "server"}) + p.serviceGraphRequestFailedTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "traces", + Name: "service_graph_request_failed_total", + Help: "Total count of failed requests between two nodes", + }, []string{"client", "server"}) + p.serviceGraphRequestServerHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "traces", + Name: "service_graph_request_server_seconds", + Help: "Time for a request between two nodes as seen from the server", + Buckets: p.cfg.HistogramBuckets, + }, []string{"client", "server"}) + p.serviceGraphRequestClientHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "traces", + Name: "service_graph_request_client_seconds", + Help: "Time for a request between two nodes as seen from the client", + Buckets: p.cfg.HistogramBuckets, + }, []string{"client", "server"}) + p.serviceGraphUnpairedSpansTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "traces", + Name: "service_graph_unpaired_spans_total", + Help: "Total count of unpaired spans", + }, []string{"client", "server"}) + p.serviceGraphDroppedSpansTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "traces", + Name: "service_graph_dropped_spans_total", + Help: "Total count of dropped spans", + }, []string{"client", "server"}) + + cs := []prometheus.Collector{ + p.serviceGraphRequestTotal, + p.serviceGraphRequestFailedTotal, + p.serviceGraphRequestServerHistogram, + p.serviceGraphRequestClientHistogram, + p.serviceGraphUnpairedSpansTotal, + p.serviceGraphDroppedSpansTotal, + } + + for _, c := range cs { + if err := reg.Register(c); err != nil { + return err + } + } + + return nil +} + +func (p *processor) unregisterMetrics(reg prometheus.Registerer) { + cs := []prometheus.Collector{ + p.serviceGraphRequestTotal, + p.serviceGraphRequestFailedTotal, + p.serviceGraphRequestServerHistogram, + p.serviceGraphRequestClientHistogram, + p.serviceGraphUnpairedSpansTotal, + p.serviceGraphDroppedSpansTotal, + } + + for _, c := range cs { + reg.Unregister(c) + } +} + +func (p *processor) PushSpans(ctx context.Context, req *tempopb.PushSpansRequest) error { + span, _ := opentracing.StartSpanFromContext(ctx, "servicegraphs.PushSpans") + defer span.Finish() + + if err := p.consume(req.Batches); err != nil { + if errors.As(err, &tooManySpansError{}) { + level.Warn(log.Logger).Log("msg", "skipped processing of spans", "maxItems", p.cfg.MaxItems, "err", err) + } else { + level.Error(log.Logger).Log("msg", "failed consuming traces", "err", err) + } + } + + return nil +} + +func (p *processor) consume(resourceSpans []*v1.ResourceSpans) error { + var totalDroppedSpans int + + for _, rs := range resourceSpans { + svcName := util.GetServiceName(rs.Resource) + if svcName == "" { + continue + } + + for _, ils := range rs.InstrumentationLibrarySpans { + var ( + edge *store.Edge + k string + err error + ) + for _, span := range ils.Spans { + switch span.Kind { + case v1.Span_SPAN_KIND_CLIENT: + k = key(hex.EncodeToString(span.TraceId), hex.EncodeToString(span.SpanId)) + edge, err = p.store.UpsertEdge(k, func(e *store.Edge) { + e.ClientService = svcName + e.ClientLatencySec = spanDurationSec(span) + e.Failed = e.Failed || p.spanFailed(span) + }) + case v1.Span_SPAN_KIND_SERVER: + k = key(hex.EncodeToString(span.TraceId), hex.EncodeToString(span.ParentSpanId)) + edge, err = p.store.UpsertEdge(k, func(e *store.Edge) { + e.ServerService = svcName + e.ServerLatencySec = spanDurationSec(span) + e.Failed = e.Failed || p.spanFailed(span) + }) + default: + continue + } + + if errors.Is(err, store.ErrTooManyItems) { + totalDroppedSpans++ + p.metricDroppedSpans.Inc() + continue + } + + // upsertEdge will only return this errTooManyItems + if err != nil { + return err + } + + if edge.IsCompleted() { + p.collectCh <- k + } + } + } + } + + if totalDroppedSpans > 0 { + return &tooManySpansError{ + droppedSpans: totalDroppedSpans, + } + } + + return nil +} + +func (p *processor) Shutdown(ctx context.Context, reg prometheus.Registerer) error { + close(p.closeCh) + p.unregisterMetrics(reg) + return nil +} + +// collectEdge records the metrics for the given edge. +// Returns true if the edge is completed or expired and should be deleted. +func (p *processor) collectEdge(e *store.Edge) { + if e.IsCompleted() { + p.serviceGraphRequestTotal.WithLabelValues(e.ClientService, e.ServerService).Inc() + if e.Failed { + p.serviceGraphRequestFailedTotal.WithLabelValues(e.ClientService, e.ServerService).Inc() + } + p.serviceGraphRequestServerHistogram.WithLabelValues(e.ClientService, e.ServerService).Observe(e.ServerLatencySec) + p.serviceGraphRequestClientHistogram.WithLabelValues(e.ClientService, e.ServerService).Observe(e.ClientLatencySec) + } else if e.IsExpired() { + p.metricUnpairedEdges.Inc() + } +} + +func (p *processor) spanFailed(span *v1.Span) bool { + return false +} + +func spanDurationSec(span *v1.Span) float64 { + return float64(span.EndTimeUnixNano-span.StartTimeUnixNano) / float64(time.Second.Nanoseconds()) +} + +func key(k1, k2 string) string { + return fmt.Sprintf("%s-%s", k1, k2) +} diff --git a/modules/generator/processor/servicegraphs/servicegraphs_test.go b/modules/generator/processor/servicegraphs/servicegraphs_test.go new file mode 100644 index 00000000000..aa137222dab --- /dev/null +++ b/modules/generator/processor/servicegraphs/servicegraphs_test.go @@ -0,0 +1,109 @@ +package servicegraphs + +import ( + "context" + "os" + "testing" + "time" + + "github.com/gogo/protobuf/jsonpb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + gen "github.com/grafana/tempo/modules/generator/processor" + test_util "github.com/grafana/tempo/modules/generator/processor/util/test" + "github.com/grafana/tempo/pkg/tempopb" +) + +func TestServiceGraphs(t *testing.T) { + cfg := Config{} + cfg.RegisterFlagsAndApplyDefaults("", nil) + p := New(cfg, "test") + + registry := gen.NewRegistry(nil) + err := p.RegisterMetrics(registry) + assert.NoError(t, err) + + now := time.Now() + registry.SetTimeNow(func() time.Time { + return now + }) + + traces := testData(t, "testdata/test-sample.json") + err = p.PushSpans(context.Background(), &tempopb.PushSpansRequest{Batches: traces.Batches}) + assert.NoError(t, err) + + // Manually call expire to force collection of edges. + sgp := p.(*processor) + sgp.store.Expire() + + appender := &test_util.Appender{} + + collectTime := now + err = registry.Gather(appender) + assert.NoError(t, err) + + assert.False(t, appender.IsCommitted) + assert.False(t, appender.IsRolledback) + + expectedMetrics := []test_util.Metric{ + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_client_seconds_bucket", le="0.1"}`, Value: 0}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_client_seconds_bucket", le="0.2"}`, Value: 0}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_client_seconds_bucket", le="0.4"}`, Value: 0}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_client_seconds_bucket", le="0.8"}`, Value: 0}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_client_seconds_bucket", le="1.6"}`, Value: 2}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_client_seconds_bucket", le="3.2"}`, Value: 3}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_client_seconds_bucket", le="6.4"}`, Value: 3}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_client_seconds_bucket", le="12.8"}`, Value: 3}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_client_seconds_bucket", le="+Inf"}`, Value: 3}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_client_seconds_count"}`, Value: 3}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_client_seconds_sum"}`, Value: 4.4}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_server_seconds_bucket", le="0.1"}`, Value: 0}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_server_seconds_bucket", le="0.2"}`, Value: 0}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_server_seconds_bucket", le="0.4"}`, Value: 0}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_server_seconds_bucket", le="0.8"}`, Value: 0}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_server_seconds_bucket", le="1.6"}`, Value: 2}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_server_seconds_bucket", le="3.2"}`, Value: 3}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_server_seconds_bucket", le="6.4"}`, Value: 3}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_server_seconds_bucket", le="12.8"}`, Value: 3}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_server_seconds_bucket", le="+Inf"}`, Value: 3}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_server_seconds_count"}`, Value: 3}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_server_seconds_sum"}`, Value: 5}, + {Labels: `{client="app", server="db", __name__="traces_service_graph_request_total"}`, Value: 3}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_client_seconds_bucket", le="0.1"}`, Value: 0}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_client_seconds_bucket", le="0.2"}`, Value: 0}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_client_seconds_bucket", le="0.4"}`, Value: 0}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_client_seconds_bucket", le="0.8"}`, Value: 0}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_client_seconds_bucket", le="1.6"}`, Value: 1}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_client_seconds_bucket", le="3.2"}`, Value: 2}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_client_seconds_bucket", le="6.4"}`, Value: 3}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_client_seconds_bucket", le="12.8"}`, Value: 3}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_client_seconds_bucket", le="+Inf"}`, Value: 3}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_client_seconds_count"}`, Value: 3}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_client_seconds_sum"}`, Value: 7.8}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_server_seconds_bucket", le="0.1"}`, Value: 0}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_server_seconds_bucket", le="0.2"}`, Value: 0}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_server_seconds_bucket", le="0.4"}`, Value: 0}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_server_seconds_bucket", le="0.8"}`, Value: 0}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_server_seconds_bucket", le="1.6"}`, Value: 1}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_server_seconds_bucket", le="3.2"}`, Value: 3}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_server_seconds_bucket", le="6.4"}`, Value: 3}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_server_seconds_bucket", le="12.8"}`, Value: 3}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_server_seconds_bucket", le="+Inf"}`, Value: 3}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_server_seconds_count"}`, Value: 3}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_server_seconds_sum"}`, Value: 6.2}, + {Labels: `{client="lb", server="app", __name__="traces_service_graph_request_total"}`, Value: 3}, + } + appender.ContainsAll(t, expectedMetrics, collectTime) +} + +func testData(t *testing.T, path string) *tempopb.Trace { + f, err := os.Open(path) + require.NoError(t, err) + + trace := &tempopb.Trace{} + err = jsonpb.Unmarshal(f, trace) + require.NoError(t, err) + + return trace +} diff --git a/modules/generator/processor/servicegraphs/store/edge.go b/modules/generator/processor/servicegraphs/store/edge.go new file mode 100644 index 00000000000..d9c0977ce3e --- /dev/null +++ b/modules/generator/processor/servicegraphs/store/edge.go @@ -0,0 +1,36 @@ +package store + +import "time" + +// Edge is an Edge between two nodes in the graph +type Edge struct { + key string + + ServerService, ClientService string + ServerLatencySec, ClientLatencySec float64 + + // If either the client or the server spans have status code error, + // the Edge will be considered as failed. + Failed bool + + // expiration is the time at which the Edge expires, expressed as Unix time + expiration int64 +} + +func NewEdge(key string, ttl time.Duration) *Edge { + return &Edge{ + key: key, + + expiration: time.Now().Add(ttl).Unix(), + } +} + +// IsCompleted returns true if the corresponding client and server +// pair spans have been processed for the given Edge +func (e *Edge) IsCompleted() bool { + return len(e.ClientService) != 0 && len(e.ServerService) != 0 +} + +func (e *Edge) IsExpired() bool { + return time.Now().Unix() >= e.expiration +} diff --git a/modules/generator/processor/servicegraphs/store/interface.go b/modules/generator/processor/servicegraphs/store/interface.go new file mode 100644 index 00000000000..c0b10df4d37 --- /dev/null +++ b/modules/generator/processor/servicegraphs/store/interface.go @@ -0,0 +1,13 @@ +package store + +type Callback func(e *Edge) + +// Store is an interface for building service graphs. +type Store interface { + // UpsertEdge inserts or updates an edge. + UpsertEdge(key string, cb Callback) (*Edge, error) + // EvictEdge removes an edge from the store. + EvictEdge(key string) + // Expire evicts all expired edges from the store. + Expire() +} diff --git a/modules/generator/processor/servicegraphs/store/store.go b/modules/generator/processor/servicegraphs/store/store.go new file mode 100644 index 00000000000..4157bb10755 --- /dev/null +++ b/modules/generator/processor/servicegraphs/store/store.go @@ -0,0 +1,134 @@ +package store + +import ( + "container/list" + "errors" + "sync" + "time" +) + +var ( + ErrTooManyItems = errors.New("too many items") +) + +var _ Store = (*store)(nil) + +type store struct { + l *list.List + mtx *sync.RWMutex + m map[string]*list.Element + + evictCallback Callback + ttl time.Duration + maxItems int +} + +func NewStore(ttl time.Duration, maxItems int, evictCallback Callback) Store { + s := &store{ + l: list.New(), + mtx: &sync.RWMutex{}, + m: make(map[string]*list.Element), + + evictCallback: evictCallback, + ttl: ttl, + maxItems: maxItems, + } + + return s +} + +func (s *store) len() int { + s.mtx.RLock() + defer s.mtx.RUnlock() + + return s.l.Len() +} + +// shouldEvictHead checks if the oldest item (head of list) has expired and should be evicted. +// Returns true if the item has expired or is completed, false otherwise. +// +// Must be called under lock. +func (s *store) shouldEvictHead() bool { + h := s.l.Front() + if h == nil { + return false + } + edge := h.Value.(*Edge) + return edge.IsCompleted() || edge.IsExpired() +} + +// evictHead removes the head from the store (and map). +// It also collects metrics for the evicted Edge. +// +// Must be called under lock. +func (s *store) evictHead() { + front := s.l.Front().Value.(*Edge) + s.evictEdge(front.key) +} + +// EvictEdge evicts and Edge under lock +func (s *store) EvictEdge(key string) { + s.mtx.Lock() + defer s.mtx.Unlock() + + s.evictEdge(key) +} + +// evictEdge removes the Edge from the store (and map). +// It also collects metrics for the evicted Edge. +// +// Must be called under lock. +func (s *store) evictEdge(key string) { + ele := s.m[key] + if ele == nil { // it may already have been processed + return + } + + edge := ele.Value.(*Edge) + s.evictCallback(edge) + + delete(s.m, key) + s.l.Remove(ele) +} + +// UpsertEdge fetches an Edge from the store. +// If the Edge doesn't exist, it creates a new one with the default TTL. +func (s *store) UpsertEdge(k string, cb Callback) (*Edge, error) { + s.mtx.Lock() + defer s.mtx.Unlock() + + if storedEdge, ok := s.m[k]; ok { + edge := storedEdge.Value.(*Edge) + cb(edge) + return edge, nil + } + + if s.l.Len() >= s.maxItems { + // todo: try to evict expired items + return nil, ErrTooManyItems + } + + newEdge := NewEdge(k, s.ttl) + ele := s.l.PushBack(newEdge) + s.m[k] = ele + cb(newEdge) + + return newEdge, nil +} + +// Expire evicts all expired items in the store. +func (s *store) Expire() { + s.mtx.RLock() + if !s.shouldEvictHead() { + s.mtx.RUnlock() + return + } + s.mtx.RUnlock() + + s.mtx.Lock() + defer s.mtx.Unlock() + + for s.shouldEvictHead() { + s.evictHead() + } +} diff --git a/modules/generator/processor/servicegraphs/store/store_test.go b/modules/generator/processor/servicegraphs/store/store_test.go new file mode 100644 index 00000000000..da4a24998de --- /dev/null +++ b/modules/generator/processor/servicegraphs/store/store_test.go @@ -0,0 +1,86 @@ +package store + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var noopUpsertCb Callback = func(e *Edge) {} + +func TestStore_UpsertEdge(t *testing.T) { + const keyStr = "key" + + var cbCallCount int + storeInterface := NewStore(time.Hour, 1, func(e *Edge) { + cbCallCount++ + }) + s := storeInterface.(*store) + assert.Equal(t, 0, s.len()) + + _, err := s.UpsertEdge(keyStr, func(e *Edge) {}) + require.NoError(t, err) + assert.Equal(t, 1, s.len()) + assert.False(t, s.shouldEvictHead()) // ttl is set to 1h + assert.Equal(t, 0, cbCallCount) + + e := getEdge(s, keyStr) + assert.NotNil(t, e) + assert.Equal(t, keyStr, e.key) + + _, err = s.UpsertEdge(keyStr+keyStr, func(e *Edge) {}) + assert.Error(t, err) + + _, err = s.UpsertEdge(keyStr, func(e *Edge) { + e.ClientService = "client" + e.ServerService = "server" + e.expiration = 0 // Expire immediately + }) + require.NoError(t, err) + assert.Equal(t, 0, cbCallCount) + + e = getEdge(s, keyStr) + assert.NotNil(t, e) + assert.Equal(t, "client", e.ClientService) + assert.Equal(t, "server", e.ServerService) + assert.True(t, s.shouldEvictHead()) + + s.evictHead() + assert.Equal(t, 0, s.len()) + assert.Equal(t, 1, cbCallCount) +} + +func TestStore_expire(t *testing.T) { + keys := map[string]bool{} + for i := 0; i < 100; i++ { + keys[fmt.Sprintf("key-%d", i)] = true + } + + // all new keys are immediately expired. + storeInterface := NewStore(-time.Second, 100, func(e *Edge) { + assert.True(t, keys[e.key]) + }) + s := storeInterface.(*store) + + for key := range keys { + _, err := s.UpsertEdge(key, noopUpsertCb) + require.NoError(t, err) + } + + s.Expire() + assert.Equal(t, 0, s.len()) +} + +// TODO add test for maxItems +// tODO add test to verify concurrency + +func getEdge(s *store, k string) *Edge { + ele, ok := s.m[k] + if !ok { + return nil + } + return ele.Value.(*Edge) +} diff --git a/modules/generator/processor/servicegraphs/testdata/test-sample.json b/modules/generator/processor/servicegraphs/testdata/test-sample.json new file mode 100644 index 00000000000..352d203111c --- /dev/null +++ b/modules/generator/processor/servicegraphs/testdata/test-sample.json @@ -0,0 +1,1916 @@ +{ + "batches":[ + { + "resource":{ + "attributes":[ + { + "key":"service.name", + "value":{ + "stringValue":"lb" + } + }, + { + "key":"cluster", + "value":{ + "stringValue":"tns-demo" + } + }, + { + "key":"namespace", + "value":{ + "stringValue":"tns-demo" + } + }, + { + "key":"opencensus.exporterversion", + "value":{ + "stringValue":"Jaeger-Go-2.22.1" + } + }, + { + "key":"host.name", + "value":{ + "stringValue":"loadgen-6b59dff4c-jdkdk" + } + }, + { + "key":"ip", + "value":{ + "stringValue":"10.136.11.153" + } + }, + { + "key":"client-uuid", + "value":{ + "stringValue":"653fad9a76c115ac" + } + }, + { + "key":"container", + "value":{ + "stringValue":"loadgen" + } + }, + { + "key":"pod", + "value":{ + "stringValue":"loadgen-6b59dff4c-jdkdk" + } + } + ] + }, + "instrumentationLibrarySpans":[ + { + "instrumentationLibrary":{ + + }, + "spans":[ + { + "traceId":"AAAAAAAAAABbdZuWJO/uiw==", + "spanId":"W3WbliTv7os=", + "name":"HTTP Client", + "startTimeUnixNano":"1626717505784699000", + "endTimeUnixNano":"1626717505833874000", + "attributes":[ + { + "key":"sampler.type", + "value":{ + "stringValue":"const" + } + }, + { + "key":"sampler.param", + "value":{ + "boolValue":true + } + } + ], + "status":{ + + } + }, + { + "traceId":"AAAAAAAAAABbdZuWJO/uiw==", + "spanId":"d5ZXpoG4mUc=", + "parentSpanId":"W3WbliTv7os=", + "name":"HTTP POST", + "kind":"SPAN_KIND_CLIENT", + "startTimeUnixNano":"1626717503329568000", + "endTimeUnixNano":"1626717505829568000", + "attributes":[ + { + "key":"http.status_code", + "value":{ + "intValue":"302" + } + }, + { + "key":"component", + "value":{ + "stringValue":"net/http" + } + }, + { + "key":"http.method", + "value":{ + "stringValue":"POST" + } + }, + { + "key":"http.url", + "value":{ + "stringValue":"app:80" + } + }, + { + "key":"net/http.reused", + "value":{ + "boolValue":false + } + }, + { + "key":"net/http.was_idle", + "value":{ + "boolValue":false + } + } + ], + "events":[ + { + "timeUnixNano":"1626717505784725000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GetConn" + } + } + ] + }, + { + "timeUnixNano":"1626717505784771000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"DNSStart" + } + }, + { + "key":"host", + "value":{ + "stringValue":"app" + } + } + ] + }, + { + "timeUnixNano":"1626717505822812000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"DNSDone" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.116.196" + } + } + ] + }, + { + "timeUnixNano":"1626717505822821000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ConnectStart" + } + }, + { + "key":"network", + "value":{ + "stringValue":"tcp" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.116.196:80" + } + } + ] + }, + { + "timeUnixNano":"1626717505822983000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ConnectDone" + } + }, + { + "key":"network", + "value":{ + "stringValue":"tcp" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.116.196:80" + } + } + ] + }, + { + "timeUnixNano":"1626717505823035000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GotConn" + } + } + ] + }, + { + "timeUnixNano":"1626717505823116000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"WroteHeaders" + } + } + ] + }, + { + "timeUnixNano":"1626717505823121000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"WroteRequest" + } + } + ] + }, + { + "timeUnixNano":"1626717505829460000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GotFirstResponseByte" + } + } + ] + }, + { + "timeUnixNano":"1626717505829568000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ClosedBody" + } + } + ] + } + ], + "status":{ + "deprecatedCode":"DEPRECATED_STATUS_CODE_UNKNOWN_ERROR", + "code":"STATUS_CODE_ERROR" + } + }, + { + "traceId":"AAAAAAAAAABbdZuWJO/uiw==", + "spanId":"T3b9lSy4e6o=", + "parentSpanId":"W3WbliTv7os=", + "name":"HTTP GET", + "kind":"SPAN_KIND_CLIENT", + "startTimeUnixNano":"1626717504533933000", + "endTimeUnixNano":"1626717505833933000", + "attributes":[ + { + "key":"http.status_code", + "value":{ + "intValue":"404" + } + }, + { + "key":"component", + "value":{ + "stringValue":"net/http" + } + }, + { + "key":"http.method", + "value":{ + "stringValue":"GET" + } + }, + { + "key":"http.url", + "value":{ + "stringValue":"app:80" + } + }, + { + "key":"net/http.reused", + "value":{ + "boolValue":false + } + }, + { + "key":"net/http.was_idle", + "value":{ + "boolValue":false + } + } + ], + "events":[ + { + "timeUnixNano":"1626717505829603000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GetConn" + } + } + ] + }, + { + "timeUnixNano":"1626717505829642000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"DNSStart" + } + }, + { + "key":"host", + "value":{ + "stringValue":"app" + } + } + ] + }, + { + "timeUnixNano":"1626717505830180000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"DNSDone" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.116.196" + } + } + ] + }, + { + "timeUnixNano":"1626717505830186000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ConnectStart" + } + }, + { + "key":"network", + "value":{ + "stringValue":"tcp" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.116.196:80" + } + } + ] + }, + { + "timeUnixNano":"1626717505830301000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ConnectDone" + } + }, + { + "key":"network", + "value":{ + "stringValue":"tcp" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.116.196:80" + } + } + ] + }, + { + "timeUnixNano":"1626717505830332000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GotConn" + } + } + ] + }, + { + "timeUnixNano":"1626717505830372000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"WroteHeaders" + } + } + ] + }, + { + "timeUnixNano":"1626717505830373000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"WroteRequest" + } + } + ] + }, + { + "timeUnixNano":"1626717505833806000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GotFirstResponseByte" + } + } + ] + }, + { + "timeUnixNano":"1626717505833933000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ClosedBody" + } + } + ] + } + ], + "status":{ + + } + } + ] + } + ] + }, + { + "resource":{ + "attributes":[ + { + "key":"service.name", + "value":{ + "stringValue":"app" + } + }, + { + "key":"cluster", + "value":{ + "stringValue":"tns-demo" + } + }, + { + "key":"namespace", + "value":{ + "stringValue":"tns-demo" + } + }, + { + "key":"opencensus.exporterversion", + "value":{ + "stringValue":"Jaeger-Go-2.22.1" + } + }, + { + "key":"host.name", + "value":{ + "stringValue":"app-7c474df6bc-xpm6j" + } + }, + { + "key":"ip", + "value":{ + "stringValue":"10.136.11.151" + } + }, + { + "key":"client-uuid", + "value":{ + "stringValue":"264ce1d77c354156" + } + }, + { + "key":"container", + "value":{ + "stringValue":"app" + } + }, + { + "key":"pod", + "value":{ + "stringValue":"app-7c474df6bc-xpm6j" + } + } + ] + }, + "instrumentationLibrarySpans":[ + { + "instrumentationLibrary":{ + + }, + "spans":[ + { + "traceId":"AAAAAAAAAABbdZuWJO/uiw==", + "spanId":"CyLVKnG8lfk=", + "parentSpanId":"d5ZXpoG4mUc=", + "name":"HTTP POST - post", + "kind":"SPAN_KIND_SERVER", + "startTimeUnixNano":"1626717504829303000", + "endTimeUnixNano":"1626717505829303000", + "attributes":[ + { + "key":"http.status_code", + "value":{ + "intValue":"302" + } + }, + { + "key":"http.method", + "value":{ + "stringValue":"POST" + } + }, + { + "key":"http.url", + "value":{ + "stringValue":"/post" + } + }, + { + "key":"component", + "value":{ + "stringValue":"net/http" + } + } + ], + "status":{ + + } + }, + { + "traceId":"AAAAAAAAAABbdZuWJO/uiw==", + "spanId":"bzp4d/duh20=", + "parentSpanId":"CyLVKnG8lfk=", + "name":"HTTP Client", + "startTimeUnixNano":"1626717505823375000", + "endTimeUnixNano":"1626717505829164000", + "status":{ + + } + }, + { + "traceId":"AAAAAAAAAABbdZuWJO/uiw==", + "spanId":"T/rh1/wSL9g=", + "parentSpanId":"bzp4d/duh20=", + "name":"HTTP POST", + "kind":"SPAN_KIND_CLIENT", + "startTimeUnixNano":"1626717504929264000", + "endTimeUnixNano":"1626717505829264000", + "attributes":[ + { + "key":"http.status_code", + "value":{ + "intValue":"208" + } + }, + { + "key":"component", + "value":{ + "stringValue":"net/http" + } + }, + { + "key":"http.method", + "value":{ + "stringValue":"POST" + } + }, + { + "key":"http.url", + "value":{ + "stringValue":"db:80" + } + }, + { + "key":"net/http.reused", + "value":{ + "boolValue":false + } + }, + { + "key":"net/http.was_idle", + "value":{ + "boolValue":false + } + } + ], + "events":[ + { + "timeUnixNano":"1626717505823393000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GetConn" + } + } + ] + }, + { + "timeUnixNano":"1626717505823439000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"DNSStart" + } + }, + { + "key":"host", + "value":{ + "stringValue":"db" + } + } + ] + }, + { + "timeUnixNano":"1626717505827663000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"DNSDone" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.106.8" + } + } + ] + }, + { + "timeUnixNano":"1626717505827673000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ConnectStart" + } + }, + { + "key":"network", + "value":{ + "stringValue":"tcp" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.106.8:80" + } + } + ] + }, + { + "timeUnixNano":"1626717505827797000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ConnectDone" + } + }, + { + "key":"network", + "value":{ + "stringValue":"tcp" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.106.8:80" + } + } + ] + }, + { + "timeUnixNano":"1626717505827824000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GotConn" + } + } + ] + }, + { + "timeUnixNano":"1626717505827896000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"WroteHeaders" + } + } + ] + }, + { + "timeUnixNano":"1626717505827901000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"WroteRequest" + } + } + ] + }, + { + "timeUnixNano":"1626717505829057000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GotFirstResponseByte" + } + } + ] + }, + { + "timeUnixNano":"1626717505829264000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ClosedBody" + } + } + ] + } + ], + "status":{ + + } + }, + { + "traceId":"AAAAAAAAAABbdZuWJO/uiw==", + "spanId":"NvbVfAxyM10=", + "parentSpanId":"T3b9lSy4e6o=", + "name":"HTTP GET - root", + "kind":"SPAN_KIND_SERVER", + "startTimeUnixNano":"1626717503833939000", + "endTimeUnixNano":"1626717505833939000", + "attributes":[ + { + "key":"http.status_code", + "value":{ + "intValue":"200" + } + }, + { + "key":"http.method", + "value":{ + "stringValue":"GET" + } + }, + { + "key":"http.url", + "value":{ + "stringValue":"/" + } + }, + { + "key":"component", + "value":{ + "stringValue":"net/http" + } + } + ], + "status":{ + + } + }, + { + "traceId":"AAAAAAAAAABbdZuWJO/uiw==", + "spanId":"M464LUSGHVU=", + "parentSpanId":"NvbVfAxyM10=", + "name":"HTTP Client", + "startTimeUnixNano":"1626717505830578000", + "endTimeUnixNano":"1626717505833516000", + "status":{ + + } + }, + { + "traceId":"AAAAAAAAAABbdZuWJO/uiw==", + "spanId":"FcfJmhEwNxM=", + "parentSpanId":"M464LUSGHVU=", + "name":"HTTP GET", + "kind":"SPAN_KIND_CLIENT", + "startTimeUnixNano":"1626717504833879000", + "endTimeUnixNano":"1626717505833879000", + "attributes":[ + { + "key":"http.status_code", + "value":{ + "intValue":"200" + } + }, + { + "key":"component", + "value":{ + "stringValue":"net/http" + } + }, + { + "key":"http.method", + "value":{ + "stringValue":"GET" + } + }, + { + "key":"http.url", + "value":{ + "stringValue":"db:80" + } + }, + { + "key":"net/http.reused", + "value":{ + "boolValue":false + } + }, + { + "key":"net/http.was_idle", + "value":{ + "boolValue":false + } + } + ], + "events":[ + { + "timeUnixNano":"1626717505830596000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GetConn" + } + } + ] + }, + { + "timeUnixNano":"1626717505830647000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"DNSStart" + } + }, + { + "key":"host", + "value":{ + "stringValue":"db" + } + } + ] + }, + { + "timeUnixNano":"1626717505832975000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"DNSDone" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.106.8" + } + } + ] + }, + { + "timeUnixNano":"1626717505832983000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ConnectStart" + } + }, + { + "key":"network", + "value":{ + "stringValue":"tcp" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.106.8:80" + } + } + ] + }, + { + "timeUnixNano":"1626717505833096000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ConnectDone" + } + }, + { + "key":"network", + "value":{ + "stringValue":"tcp" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.106.8:80" + } + } + ] + }, + { + "timeUnixNano":"1626717505833116000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GotConn" + } + } + ] + }, + { + "timeUnixNano":"1626717505833153000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"WroteHeaders" + } + } + ] + }, + { + "timeUnixNano":"1626717505833154000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"WroteRequest" + } + } + ] + }, + { + "timeUnixNano":"1626717505833460000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GotFirstResponseByte" + } + } + ] + }, + { + "timeUnixNano":"1626717505833880000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ClosedBody" + } + } + ] + } + ], + "status":{ + + } + } + ] + } + ] + }, + { + "resource":{ + "attributes":[ + { + "key":"service.name", + "value":{ + "stringValue":"db" + } + }, + { + "key":"cluster", + "value":{ + "stringValue":"tns-demo" + } + }, + { + "key":"namespace", + "value":{ + "stringValue":"tns-demo" + } + }, + { + "key":"opencensus.exporterversion", + "value":{ + "stringValue":"Jaeger-Go-2.22.1" + } + }, + { + "key":"host.name", + "value":{ + "stringValue":"db-7488656cb4-m8ljw" + } + }, + { + "key":"ip", + "value":{ + "stringValue":"10.136.11.152" + } + }, + { + "key":"client-uuid", + "value":{ + "stringValue":"392a5faabe967ba3" + } + }, + { + "key":"container", + "value":{ + "stringValue":"db" + } + }, + { + "key":"pod", + "value":{ + "stringValue":"db-7488656cb4-m8ljw" + } + } + ] + }, + "instrumentationLibrarySpans":[ + { + "instrumentationLibrary":{ + + }, + "spans":[ + { + "traceId":"AAAAAAAAAABbdZuWJO/uiw==", + "spanId":"IElW8xeWvqs=", + "parentSpanId":"T/rh1/wSL9g=", + "name":"HTTP POST - post", + "kind":"SPAN_KIND_SERVER", + "startTimeUnixNano":"1626717504328961000", + "endTimeUnixNano":"1626717505828961000", + "attributes":[ + { + "key":"http.status_code", + "value":{ + "intValue":"208" + } + }, + { + "key":"http.method", + "value":{ + "stringValue":"POST" + } + }, + { + "key":"http.url", + "value":{ + "stringValue":"/post" + } + }, + { + "key":"component", + "value":{ + "stringValue":"net/http" + } + } + ], + "status":{ + + } + }, + { + "traceId":"AAAAAAAAAABbdZuWJO/uiw==", + "spanId":"frLAE97IEMg=", + "parentSpanId":"FcfJmhEwNxM=", + "name":"HTTP GET - root", + "kind":"SPAN_KIND_SERVER", + "startTimeUnixNano":"1626717504833394000", + "endTimeUnixNano":"1626717505833394000", + "attributes":[ + { + "key":"http.status_code", + "value":{ + "intValue":"200" + } + }, + { + "key":"http.method", + "value":{ + "stringValue":"GET" + } + }, + { + "key":"http.url", + "value":{ + "stringValue":"/" + } + }, + { + "key":"component", + "value":{ + "stringValue":"net/http" + } + } + ], + "status":{ + + } + } + ] + } + ] + }, + { + "resource":{ + "attributes":[ + { + "key":"service.name", + "value":{ + "stringValue":"lb" + } + }, + { + "key":"cluster", + "value":{ + "stringValue":"tns-demo" + } + }, + { + "key":"namespace", + "value":{ + "stringValue":"tns-demo" + } + }, + { + "key":"opencensus.exporterversion", + "value":{ + "stringValue":"Jaeger-Go-2.22.1" + } + }, + { + "key":"host.name", + "value":{ + "stringValue":"loadgen-6b59dff4c-jdkdk" + } + }, + { + "key":"ip", + "value":{ + "stringValue":"10.136.11.153" + } + }, + { + "key":"client-uuid", + "value":{ + "stringValue":"653fad9a76c115ac" + } + }, + { + "key":"container", + "value":{ + "stringValue":"loadgen" + } + }, + { + "key":"pod", + "value":{ + "stringValue":"loadgen-6b59dff4c-jdkdk" + } + } + ] + }, + "instrumentationLibrarySpans":[ + { + "instrumentationLibrary":{ + + }, + "spans":[ + { + "traceId":"AAAAAAAAAABFFdxqWiBZ9g==", + "spanId":"RRXcalogWfY=", + "name":"HTTP Client", + "startTimeUnixNano":"1626717505125848000", + "endTimeUnixNano":"1626717505130325000", + "attributes":[ + { + "key":"sampler.type", + "value":{ + "stringValue":"const" + } + }, + { + "key":"sampler.param", + "value":{ + "boolValue":true + } + } + ], + "status":{ + + } + }, + { + "traceId":"AAAAAAAAAABFFdxqWiBZ9g==", + "spanId":"Uoyxh+9zsBo=", + "parentSpanId":"RRXcalogWfY=", + "name":"HTTP GET", + "kind":"SPAN_KIND_CLIENT", + "startTimeUnixNano":"1626717501130383000", + "endTimeUnixNano":"1626717505130383000", + "attributes":[ + { + "key":"http.status_code", + "value":{ + "intValue":"200" + } + }, + { + "key":"component", + "value":{ + "stringValue":"net/http" + } + }, + { + "key":"http.method", + "value":{ + "stringValue":"GET" + } + }, + { + "key":"http.url", + "value":{ + "stringValue":"app:80" + } + }, + { + "key":"net/http.reused", + "value":{ + "boolValue":false + } + }, + { + "key":"net/http.was_idle", + "value":{ + "boolValue":false + } + } + ], + "events":[ + { + "timeUnixNano":"1626717505125868000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GetConn" + } + } + ] + }, + { + "timeUnixNano":"1626717505125909000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"DNSStart" + } + }, + { + "key":"host", + "value":{ + "stringValue":"app" + } + } + ] + }, + { + "timeUnixNano":"1626717505126450000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"DNSDone" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.116.196" + } + } + ] + }, + { + "timeUnixNano":"1626717505126455000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ConnectStart" + } + }, + { + "key":"network", + "value":{ + "stringValue":"tcp" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.116.196:80" + } + } + ] + }, + { + "timeUnixNano":"1626717505126534000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ConnectDone" + } + }, + { + "key":"network", + "value":{ + "stringValue":"tcp" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.116.196:80" + } + } + ] + }, + { + "timeUnixNano":"1626717505126556000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GotConn" + } + } + ] + }, + { + "timeUnixNano":"1626717505126585000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"WroteHeaders" + } + } + ] + }, + { + "timeUnixNano":"1626717505126586000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"WroteRequest" + } + } + ] + }, + { + "timeUnixNano":"1626717505130263000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GotFirstResponseByte" + } + } + ] + }, + { + "timeUnixNano":"1626717505130383000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ClosedBody" + } + } + ] + } + ], + "status":{ + + } + } + ] + } + ] + }, + { + "resource":{ + "attributes":[ + { + "key":"service.name", + "value":{ + "stringValue":"app" + } + }, + { + "key":"cluster", + "value":{ + "stringValue":"tns-demo" + } + }, + { + "key":"namespace", + "value":{ + "stringValue":"tns-demo" + } + }, + { + "key":"opencensus.exporterversion", + "value":{ + "stringValue":"Jaeger-Go-2.22.1" + } + }, + { + "key":"host.name", + "value":{ + "stringValue":"app-7c474df6bc-xpm6j" + } + }, + { + "key":"ip", + "value":{ + "stringValue":"10.136.11.151" + } + }, + { + "key":"client-uuid", + "value":{ + "stringValue":"264ce1d77c354156" + } + }, + { + "key":"pod", + "value":{ + "stringValue":"app-7c474df6bc-xpm6j" + } + }, + { + "key":"container", + "value":{ + "stringValue":"app" + } + } + ] + }, + "instrumentationLibrarySpans":[ + { + "instrumentationLibrary":{ + + }, + "spans":[ + { + "traceId":"AAAAAAAAAABFFdxqWiBZ9g==", + "spanId":"UQZfs+d8Sdw=", + "parentSpanId":"Uoyxh+9zsBo=", + "name":"HTTP GET - root", + "kind":"SPAN_KIND_SERVER", + "startTimeUnixNano":"1626717501930340000", + "endTimeUnixNano":"1626717505130340000", + "attributes":[ + { + "key":"http.status_code", + "value":{ + "intValue":"200" + } + }, + { + "key":"http.method", + "value":{ + "stringValue":"GET" + } + }, + { + "key":"http.url", + "value":{ + "stringValue":"/" + } + }, + { + "key":"component", + "value":{ + "stringValue":"net/http" + } + } + ], + "status":{ + + } + }, + { + "traceId":"AAAAAAAAAABFFdxqWiBZ9g==", + "spanId":"H7/n/FTsqf0=", + "parentSpanId":"UQZfs+d8Sdw=", + "name":"HTTP Client", + "startTimeUnixNano":"1626717505126741000", + "endTimeUnixNano":"1626717505130038000", + "status":{ + + } + }, + { + "traceId":"AAAAAAAAAABFFdxqWiBZ9g==", + "spanId":"ERieXkZU3MU=", + "parentSpanId":"H7/n/FTsqf0=", + "name":"HTTP GET", + "kind":"SPAN_KIND_CLIENT", + "startTimeUnixNano":"1626717502630304000", + "endTimeUnixNano":"1626717505130304000", + "attributes":[ + { + "key":"http.status_code", + "value":{ + "intValue":"200" + } + }, + { + "key":"component", + "value":{ + "stringValue":"net/http" + } + }, + { + "key":"http.method", + "value":{ + "stringValue":"GET" + } + }, + { + "key":"http.url", + "value":{ + "stringValue":"db:80" + } + }, + { + "key":"net/http.reused", + "value":{ + "boolValue":false + } + }, + { + "key":"net/http.was_idle", + "value":{ + "boolValue":false + } + } + ], + "events":[ + { + "timeUnixNano":"1626717505126754000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GetConn" + } + } + ] + }, + { + "timeUnixNano":"1626717505126800000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"DNSStart" + } + }, + { + "key":"host", + "value":{ + "stringValue":"db" + } + } + ] + }, + { + "timeUnixNano":"1626717505129605000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"DNSDone" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.106.8" + } + } + ] + }, + { + "timeUnixNano":"1626717505129609000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ConnectStart" + } + }, + { + "key":"network", + "value":{ + "stringValue":"tcp" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.106.8:80" + } + } + ] + }, + { + "timeUnixNano":"1626717505129678000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ConnectDone" + } + }, + { + "key":"network", + "value":{ + "stringValue":"tcp" + } + }, + { + "key":"addr", + "value":{ + "stringValue":"10.188.106.8:80" + } + } + ] + }, + { + "timeUnixNano":"1626717505129695000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GotConn" + } + } + ] + }, + { + "timeUnixNano":"1626717505129727000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"WroteHeaders" + } + } + ] + }, + { + "timeUnixNano":"1626717505129727000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"WroteRequest" + } + } + ] + }, + { + "timeUnixNano":"1626717505129965000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"GotFirstResponseByte" + } + } + ] + }, + { + "timeUnixNano":"1626717505130303000", + "attributes":[ + { + "key":"event", + "value":{ + "stringValue":"ClosedBody" + } + } + ] + } + ], + "status":{ + + } + } + ] + } + ] + }, + { + "resource":{ + "attributes":[ + { + "key":"service.name", + "value":{ + "stringValue":"db" + } + }, + { + "key":"cluster", + "value":{ + "stringValue":"tns-demo" + } + }, + { + "key":"namespace", + "value":{ + "stringValue":"tns-demo" + } + }, + { + "key":"opencensus.exporterversion", + "value":{ + "stringValue":"Jaeger-Go-2.22.1" + } + }, + { + "key":"host.name", + "value":{ + "stringValue":"db-7488656cb4-m8ljw" + } + }, + { + "key":"ip", + "value":{ + "stringValue":"10.136.11.152" + } + }, + { + "key":"client-uuid", + "value":{ + "stringValue":"392a5faabe967ba3" + } + }, + { + "key":"container", + "value":{ + "stringValue":"db" + } + }, + { + "key":"pod", + "value":{ + "stringValue":"db-7488656cb4-m8ljw" + } + } + ] + }, + "instrumentationLibrarySpans":[ + { + "instrumentationLibrary":{ + + }, + "spans":[ + { + "traceId":"AAAAAAAAAABFFdxqWiBZ9g==", + "spanId":"ZCqiD3w+XGc=", + "parentSpanId":"ERieXkZU3MU=", + "name":"HTTP GET - root", + "kind":"SPAN_KIND_SERVER", + "startTimeUnixNano":"1626717502629891000", + "endTimeUnixNano":"1626717505129891000", + "attributes":[ + { + "key":"http.status_code", + "value":{ + "intValue":"200" + } + }, + { + "key":"http.method", + "value":{ + "stringValue":"GET" + } + }, + { + "key":"http.url", + "value":{ + "stringValue":"/" + } + }, + { + "key":"component", + "value":{ + "stringValue":"net/http" + } + } + ], + "status":{ + + } + } + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/modules/generator/processor/spanmetrics/config.go b/modules/generator/processor/spanmetrics/config.go new file mode 100644 index 00000000000..898c1040483 --- /dev/null +++ b/modules/generator/processor/spanmetrics/config.go @@ -0,0 +1,24 @@ +package spanmetrics + +import ( + "flag" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + Name = "span-metrics" +) + +type Config struct { + // Buckets for latency histogram in seconds. + HistogramBuckets []float64 `yaml:"histogram_buckets"` + // Additional dimensions (labels) to be added to the metric, + // along with the default ones (service, span_name, span_kind and span_status). + Dimensions []string `yaml:"dimensions"` +} + +func (cfg *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) { + // TODO: Revisit this default value. + cfg.HistogramBuckets = prometheus.ExponentialBuckets(0.002, 2, 12) +} diff --git a/modules/generator/processor/spanmetrics/spanmetrics.go b/modules/generator/processor/spanmetrics/spanmetrics.go new file mode 100644 index 00000000000..7bdb1bb2252 --- /dev/null +++ b/modules/generator/processor/spanmetrics/spanmetrics.go @@ -0,0 +1,128 @@ +package spanmetrics + +import ( + "context" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" + + gen "github.com/grafana/tempo/modules/generator/processor" + processor_util "github.com/grafana/tempo/modules/generator/processor/util" + "github.com/grafana/tempo/pkg/tempopb" + v1_trace "github.com/grafana/tempo/pkg/tempopb/trace/v1" + tempo_util "github.com/grafana/tempo/pkg/util" +) + +type processor struct { + cfg Config + + spanMetricsCallsTotal *prometheus.CounterVec + spanMetricsDurationSeconds *prometheus.HistogramVec + + // for testing + now func() time.Time +} + +func New(cfg Config, tenant string) gen.Processor { + return &processor{ + cfg: cfg, + now: time.Now, + } +} + +func (p *processor) Name() string { return Name } + +func (p *processor) RegisterMetrics(reg prometheus.Registerer) error { + labelNames := []string{"service", "span_name", "span_kind", "span_status"} + if len(p.cfg.Dimensions) > 0 { + labelNames = append(labelNames, p.cfg.Dimensions...) + } + + p.spanMetricsCallsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "traces", + Name: "spanmetrics_calls_total", + Help: "Total count of the spans", + }, labelNames) + p.spanMetricsDurationSeconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "traces", + Name: "spanmetrics_duration_seconds", + Help: "Latency of the spans", + Buckets: p.cfg.HistogramBuckets, + }, labelNames) + + cs := []prometheus.Collector{ + p.spanMetricsCallsTotal, + p.spanMetricsDurationSeconds, + } + + for _, c := range cs { + if err := reg.Register(c); err != nil { + return err + } + } + + return nil +} + +func (p *processor) unregisterMetrics(reg prometheus.Registerer) { + cs := []prometheus.Collector{ + p.spanMetricsCallsTotal, + p.spanMetricsDurationSeconds, + } + + for _, c := range cs { + reg.Unregister(c) + } +} + +func (p *processor) PushSpans(ctx context.Context, req *tempopb.PushSpansRequest) error { + span, _ := opentracing.StartSpanFromContext(ctx, "spanmetrics.PushSpans") + defer span.Finish() + + p.aggregateMetrics(req.Batches) + + return nil +} + +func (p *processor) Shutdown(ctx context.Context, reg prometheus.Registerer) error { + p.unregisterMetrics(reg) + return nil +} + +func (p *processor) aggregateMetrics(resourceSpans []*v1_trace.ResourceSpans) { + for _, rs := range resourceSpans { + svcName := processor_util.GetServiceName(rs.Resource) + if svcName == "" { + continue + } + for _, ils := range rs.InstrumentationLibrarySpans { + for _, span := range ils.Spans { + p.aggregateMetricsForSpan(svcName, span) + } + } + } +} + +func (p *processor) aggregateMetricsForSpan(svcName string, span *v1_trace.Span) { + latencySeconds := float64(span.GetEndTimeUnixNano()-span.GetStartTimeUnixNano()) / float64(time.Second.Nanoseconds()) + + labelValues := []string{svcName, span.GetName(), span.GetKind().String(), span.GetStatus().GetCode().String()} + + if len(p.cfg.Dimensions) > 0 { + // Build additional dimensions + for _, d := range p.cfg.Dimensions { + for _, attr := range span.Attributes { + if d == attr.Key { + labelValues = append(labelValues, attr.GetValue().GetStringValue()) + } + } + } + + } + + p.spanMetricsCallsTotal.WithLabelValues(labelValues...).Inc() + p.spanMetricsDurationSeconds.WithLabelValues(labelValues...).(prometheus.ExemplarObserver).ObserveWithExemplar( + latencySeconds, prometheus.Labels{"traceID": tempo_util.TraceIDToHexString(span.TraceId)}, + ) +} diff --git a/modules/generator/processor/spanmetrics/spanmetrics_test.go b/modules/generator/processor/spanmetrics/spanmetrics_test.go new file mode 100644 index 00000000000..58e50953ef2 --- /dev/null +++ b/modules/generator/processor/spanmetrics/spanmetrics_test.go @@ -0,0 +1,126 @@ +package spanmetrics + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + gen "github.com/grafana/tempo/modules/generator/processor" + test_util "github.com/grafana/tempo/modules/generator/processor/util/test" + "github.com/grafana/tempo/pkg/tempopb" + common_v1 "github.com/grafana/tempo/pkg/tempopb/common/v1" + trace_v1 "github.com/grafana/tempo/pkg/tempopb/trace/v1" + "github.com/grafana/tempo/pkg/util/test" +) + +func TestSpanMetrics(t *testing.T) { + cfg := Config{} + cfg.RegisterFlagsAndApplyDefaults("", nil) + p := New(cfg, "test") + + registry := gen.NewRegistry(nil) + err := p.RegisterMetrics(registry) + assert.NoError(t, err) + + now := time.Now() + registry.SetTimeNow(func() time.Time { + return now + }) + + // TODO give these spans some duration so we can verify latencies are recorded correctly, in fact we should also test with various span names etc. + req := test.MakeBatch(10, nil) + + err = p.PushSpans(context.Background(), &tempopb.PushSpansRequest{Batches: []*trace_v1.ResourceSpans{req}}) + assert.NoError(t, err) + + appender := &test_util.Appender{} + + collectTime := now + err = registry.Gather(appender) + assert.NoError(t, err) + + assert.False(t, appender.IsCommitted) + assert.False(t, appender.IsRolledback) + + expectedMetrics := []test_util.Metric{ + {Labels: `{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_calls_total"}`, Value: 10}, + {Labels: `{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_count"}`, Value: 10}, + {Labels: `{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_sum"}`, Value: 10}, + {Labels: `{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.002"}`, Value: 0}, + {Labels: `{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.004"}`, Value: 0}, + {Labels: `{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.008"}`, Value: 0}, + {Labels: `{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.016"}`, Value: 0}, + {Labels: `{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.032"}`, Value: 0}, + {Labels: `{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.064"}`, Value: 0}, + {Labels: `{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.128"}`, Value: 0}, + {Labels: `{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.256"}`, Value: 0}, + {Labels: `{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.512"}`, Value: 0}, + {Labels: `{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="1.024"}`, Value: 10}, + {Labels: `{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="2.048"}`, Value: 10}, + {Labels: `{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="4.096"}`, Value: 10}, + {Labels: `{service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="+Inf"}`, Value: 10}, + } + appender.ContainsAll(t, expectedMetrics, collectTime) +} + +func TestSpanMetrics_dimensions(t *testing.T) { + cfg := Config{} + cfg.RegisterFlagsAndApplyDefaults("", nil) + cfg.Dimensions = []string{"foo", "bar"} + p := New(cfg, "test") + + registry := gen.NewRegistry(nil) + err := p.RegisterMetrics(registry) + assert.NoError(t, err) + + now := time.Now() + registry.SetTimeNow(func() time.Time { + return now + }) + + batch := test.MakeBatch(10, nil) + for _, rs := range batch.InstrumentationLibrarySpans { + for _, s := range rs.Spans { + s.Attributes = append(s.Attributes, &common_v1.KeyValue{ + Key: "foo", + Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "foo-value"}}, + }) + s.Attributes = append(s.Attributes, &common_v1.KeyValue{ + Key: "bar", + Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "bar-value"}}, + }) + } + } + err = p.PushSpans(context.Background(), &tempopb.PushSpansRequest{Batches: []*trace_v1.ResourceSpans{batch}}) + assert.NoError(t, err) + + appender := &test_util.Appender{} + + err = registry.Gather(appender) + assert.NoError(t, err) + + assert.False(t, appender.IsCommitted) + assert.False(t, appender.IsRolledback) + + expectedMetrics := []test_util.Metric{ + {Labels: `{bar="bar-value", foo="foo-value", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_calls_total"}`, Value: 10}, + {Labels: `{bar="bar-value", foo="foo-value", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_count"}`, Value: 10}, + {Labels: `{bar="bar-value", foo="foo-value", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_sum"}`, Value: 10}, + {Labels: `{bar="bar-value", foo="foo-value", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.002"}`, Value: 0}, + {Labels: `{bar="bar-value", foo="foo-value", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.004"}`, Value: 0}, + {Labels: `{bar="bar-value", foo="foo-value", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.008"}`, Value: 0}, + {Labels: `{bar="bar-value", foo="foo-value", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.016"}`, Value: 0}, + {Labels: `{bar="bar-value", foo="foo-value", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.032"}`, Value: 0}, + {Labels: `{bar="bar-value", foo="foo-value", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.064"}`, Value: 0}, + {Labels: `{bar="bar-value", foo="foo-value", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.128"}`, Value: 0}, + {Labels: `{bar="bar-value", foo="foo-value", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.256"}`, Value: 0}, + {Labels: `{bar="bar-value", foo="foo-value", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="0.512"}`, Value: 0}, + {Labels: `{bar="bar-value", foo="foo-value", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="1.024"}`, Value: 10}, + {Labels: `{bar="bar-value", foo="foo-value", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="2.048"}`, Value: 10}, + {Labels: `{bar="bar-value", foo="foo-value", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="4.096"}`, Value: 10}, + {Labels: `{bar="bar-value", foo="foo-value", service="test-service", span_kind="SPAN_KIND_CLIENT", span_name="test", span_status="STATUS_CODE_OK", __name__="traces_spanmetrics_duration_seconds_bucket", le="+Inf"}`, Value: 10}, + } + appender.ContainsAll(t, expectedMetrics, now) +} diff --git a/modules/generator/processor/util/test/appender.go b/modules/generator/processor/util/test/appender.go new file mode 100644 index 00000000000..50c026619a7 --- /dev/null +++ b/modules/generator/processor/util/test/appender.go @@ -0,0 +1,140 @@ +package test + +import ( + "fmt" + "sort" + "testing" + "time" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "github.com/stretchr/testify/assert" +) + +// Appender is a storage.Appender to be used in tests. It will store appended samples and has +// test functions to verify these are correct. +type Appender struct { + IsCommitted, IsRolledback bool + + samples []Sample + exemplars []Exemplar +} + +type Metric struct { + Labels string + Value float64 +} + +type Sample struct { + l labels.Labels + t int64 + v float64 +} + +type Exemplar struct { + l labels.Labels + e exemplar.Exemplar +} + +var _ storage.Appender = (*Appender)(nil) + +func (a *Appender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { + a.samples = append(a.samples, Sample{l, t, v}) + return 0, nil +} + +func (a *Appender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { + a.exemplars = append(a.exemplars, Exemplar{l, e}) + return 0, nil +} + +func (a *Appender) Commit() error { + a.IsCommitted = true + return nil +} + +func (a *Appender) Rollback() error { + a.IsRolledback = true + return nil +} + +// Contains asserts that Appender contains expectedSample. +func (a *Appender) Contains(t *testing.T, expectedSample Metric) { + assert.Greater(t, len(a.samples), 0) + for _, sample := range a.samples { + if expectedSample.Labels != sample.l.String() { + continue + } + assert.Equal(t, expectedSample.Value, sample.v) + return + } + + t.Fatalf("could not find sample %v in Appender", expectedSample) +} + +// NotContains asserts that Appender does not contain a sample with the given labels. +func (a *Appender) NotContains(t *testing.T, labels string) { + for _, sample := range a.samples { + if labels == sample.l.String() { + t.Fatalf("appender contains sample %s", labels) + return + } + } +} + +// ContainsAll asserts that Appender contains all of expectedSamples in the given order. +// All samples should have a timestamp equal to timestamp with 1 millisecond of error margin. +func (a *Appender) ContainsAll(t *testing.T, expectedSamples []Metric, timestamp time.Time) { + if len(expectedSamples) > 0 { + assert.NotEmpty(t, a.samples) + } + + if len(a.samples) != len(expectedSamples) { + t.Errorf("amount of recorded samples is not equal to expected, got %d expected %d", len(a.samples), len(expectedSamples)) + a.printMetrics() + return + } + + sort.Slice(expectedSamples, func(i, j int) bool { + return expectedSamples[i].Labels < expectedSamples[j].Labels + }) + sort.Slice(a.samples, func(i, j int) bool { + return a.samples[i].l.String() < a.samples[j].l.String() + }) + + for i, sample := range a.samples { + labelsEqual := assert.Equal(t, expectedSamples[i].Labels, sample.l.String()) + if !labelsEqual { + // This will happen if a time series is missing or incorrect, instead of printing a wall + // of failed asserts as we continue iterating through the list, just dump the contents. + a.printMetrics() + return + } + + assert.InDelta(t, timestamp.UnixMilli(), sample.t, 1, sample.l) + assert.Equal(t, expectedSamples[i].Value, sample.v, sample.l) + } +} + +func (a *Appender) ContainsAllExemplars(t *testing.T, l []string, e []exemplar.Exemplar) { + for i, exemplar := range a.exemplars { + labelsEqual := assert.Equal(t, l[i], exemplar.l.String()) + if !labelsEqual { + return + } + + assert.Equal(t, e[i].Labels, exemplar.e.Labels) + assert.Equal(t, e[i].Value, exemplar.e.Value) + assert.InDelta(t, e[i].Ts, exemplar.e.Ts, 5) + assert.Equal(t, e[i].HasTs, exemplar.e.HasTs) + } +} + +func (a *Appender) printMetrics() { + fmt.Println("Test appender contains the following metrics") + for i := range a.samples { + fmt.Printf("%s %g\n", a.samples[i].l.String(), a.samples[i].v) + } + +} diff --git a/modules/generator/processor/util/util.go b/modules/generator/processor/util/util.go new file mode 100644 index 00000000000..1221f974b9f --- /dev/null +++ b/modules/generator/processor/util/util.go @@ -0,0 +1,16 @@ +package util + +import ( + v1_resource "github.com/grafana/tempo/pkg/tempopb/resource/v1" + semconv "go.opentelemetry.io/collector/model/semconv/v1.5.0" +) + +func GetServiceName(rs *v1_resource.Resource) string { + for _, attr := range rs.Attributes { + if attr.Key == semconv.AttributeServiceName { + return attr.Value.GetStringValue() + } + } + + return "" +} diff --git a/modules/generator/remotewrite/appendable.go b/modules/generator/remotewrite/appendable.go new file mode 100644 index 00000000000..51b6027eeae --- /dev/null +++ b/modules/generator/remotewrite/appendable.go @@ -0,0 +1,54 @@ +package remotewrite + +import ( + "context" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/prometheus/storage" +) + +// remoteWriteAppendable is a Prometheus storage.Appendable that remote writes samples and exemplars. +type remoteWriteAppendable struct { + logger log.Logger + tenantID string + cfg *Config + + // TODO add overrides/limits + + metrics *Metrics +} + +var _ storage.Appendable = (*remoteWriteAppendable)(nil) + +// NewAppendable creates a Prometheus storage.Appendable that can remote write. If +// tenantID is not empty, it sets the X-Scope-Orgid header on every request. +func NewAppendable(cfg *Config, logger log.Logger, tenantID string, metrics *Metrics) storage.Appendable { + if !cfg.Enabled { + level.Info(logger).Log("msg", "remote-write is disabled") + return &NoopAppender{} + } + + return &remoteWriteAppendable{ + logger: logger, + tenantID: tenantID, + cfg: cfg, + metrics: metrics, + } +} + +func (a *remoteWriteAppendable) Appender(ctx context.Context) storage.Appender { + client, err := newRemoteWriteClient(&a.cfg.Client, a.tenantID) + if err != nil { + level.Error(a.logger).Log("msg", "error creating remote-write client; setting appender as noop", "err", err, "tenant", a.tenantID) + return &NoopAppender{} + } + + return &remoteWriteAppender{ + logger: a.logger, + ctx: ctx, + remoteWriter: client, + userID: a.tenantID, + metrics: a.metrics, + } +} diff --git a/modules/generator/remotewrite/appendable_test.go b/modules/generator/remotewrite/appendable_test.go new file mode 100644 index 00000000000..ae235f4a1e7 --- /dev/null +++ b/modules/generator/remotewrite/appendable_test.go @@ -0,0 +1,102 @@ +package remotewrite + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "os" + "testing" + "time" + + gokitlog "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + prometheus_common_config "github.com/prometheus/common/config" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/storage/remote" + "github.com/stretchr/testify/assert" +) + +func Test_remoteWriteAppendable(t *testing.T) { + theTime := time.Now() + + var capturedTimeseries []prompb.TimeSeries + server := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + writeRequest, err := remote.DecodeWriteRequest(req.Body) + assert.NoError(t, err) + + capturedTimeseries = writeRequest.GetTimeseries() + })) + defer server.Close() + + url, err := url.Parse(fmt.Sprintf("http://%s/receive", server.Listener.Addr().String())) + assert.NoError(t, err) + + clientCfg := config.DefaultRemoteWriteConfig + clientCfg.URL = &prometheus_common_config.URL{URL: url} + + cfg := &Config{ + Enabled: true, + Client: clientCfg, + } + tenantID := "my-tenant" + + appendable := NewAppendable(cfg, gokitlog.NewLogfmtLogger(os.Stdout), tenantID, NewMetrics(prometheus.NewRegistry())) + + appender := appendable.Appender(context.Background()) + + _, err = appender.Append(0, labels.Labels{{Name: "label", Value: "append-before-rollback"}}, theTime.UnixMilli(), 0.1) + assert.NoError(t, err) + + // Rollback the appender, this should discard previously appended samples + err = appender.Rollback() + assert.NoError(t, err) + + err = appender.Commit() + assert.NoError(t, err) + + assert.Len(t, capturedTimeseries, 0) + + _, err = appender.Append(0, labels.Labels{{Name: "label", Value: "value"}}, theTime.UnixMilli(), 0.2) + assert.NoError(t, err) + + err = appender.Commit() + assert.NoError(t, err) + + assert.Len(t, capturedTimeseries, 1) + assert.Len(t, capturedTimeseries[0].Labels, 1) + assert.Equal(t, `name:"label" value:"value" `, capturedTimeseries[0].Labels[0].String()) + assert.Len(t, capturedTimeseries[0].Samples, 1) + assert.Equal(t, fmt.Sprintf(`value:0.2 timestamp:%d `, theTime.UnixMilli()), capturedTimeseries[0].Samples[0].String()) +} + +func Test_remoteWriteAppendable_disabled(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + t.Fatal("server should never be called") + })) + defer server.Close() + + url, err := url.Parse(fmt.Sprintf("http://%s/receive", server.Listener.Addr().String())) + assert.NoError(t, err) + + clientCfg := config.DefaultRemoteWriteConfig + clientCfg.URL = &prometheus_common_config.URL{URL: url} + + cfg := &Config{ + Enabled: false, + Client: clientCfg, + } + + appendable := NewAppendable(cfg, gokitlog.NewLogfmtLogger(os.Stdout), "", NewMetrics(prometheus.NewRegistry())) + + appender := appendable.Appender(context.Background()) + + _, err = appender.Append(0, labels.Labels{{Name: "label", Value: "value"}}, time.Now().UnixMilli(), 0.1) + assert.NoError(t, err) + + err = appender.Commit() + assert.NoError(t, err) +} diff --git a/modules/generator/remotewrite/appender.go b/modules/generator/remotewrite/appender.go new file mode 100644 index 00000000000..eccfee84adf --- /dev/null +++ b/modules/generator/remotewrite/appender.go @@ -0,0 +1,156 @@ +package remotewrite + +import ( + "context" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/klauspost/compress/snappy" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/storage" +) + +// TODO: make this configurable +var maxWriteRequestSize = 3 * 1024 * 1024 // 3MB + +// remoteWriteAppender is a storage.Appender that remote writes samples and exemplars. +type remoteWriteAppender struct { + logger log.Logger + ctx context.Context + remoteWriter *remoteWriteClient + userID string + + // TODO Loki uses util.EvictingQueue here to limit the amount of samples written per remote write request + labels [][]prompb.Label + samples []prompb.Sample + exemplarLabels [][]prompb.Label + exemplars []prompb.Exemplar + + metrics *Metrics +} + +var _ storage.Appender = (*remoteWriteAppender)(nil) + +func (a *remoteWriteAppender) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { + a.labels = append(a.labels, labelsToLabelsProto(l)) + a.samples = append(a.samples, prompb.Sample{ + Timestamp: t, + Value: v, + }) + return 0, nil +} + +func (a *remoteWriteAppender) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { + a.exemplarLabels = append(a.exemplarLabels, labelsToLabelsProto(l)) + a.exemplars = append(a.exemplars, prompb.Exemplar{ + Labels: labelsToLabelsProto(e.Labels), + Value: e.Value, + Timestamp: e.Ts, + }) + return 0, nil +} + +func (a *remoteWriteAppender) Commit() error { + level.Debug(a.logger).Log("msg", "writing samples to remote_write target", "tenant", a.userID, "target", a.remoteWriter.Endpoint(), "count", len(a.samples)) + + if len(a.samples) == 0 { + return nil + } + + reqs := a.buildRequests() + + a.metrics.samplesSent.WithLabelValues(a.userID).Add(float64(len(a.samples))) + a.metrics.exemplarsSent.WithLabelValues(a.userID).Add(float64(len(a.exemplars))) + a.metrics.remoteWriteTotal.WithLabelValues(a.userID).Add(float64(len(reqs))) + + err := a.sendRequests(reqs) + if err != nil { + level.Error(a.logger).Log("msg", "error sending remote-write requests", "tenant", a.userID, "target", a.remoteWriter.Endpoint(), "err", err) + a.metrics.remoteWriteErrors.WithLabelValues(a.userID).Inc() + return err + } + + a.clearBuffers() + return nil +} + +// buildRequests builds a slice of prompb.WriteRequest of which each requests has a maximum size of +// maxWriteRequestSize (uncompressed). +func (a *remoteWriteAppender) buildRequests() []*prompb.WriteRequest { + var requests []*prompb.WriteRequest + currentRequest := &prompb.WriteRequest{} + + appendTimeSeries := func(ts prompb.TimeSeries) { + if currentRequest.Size()+ts.Size() >= maxWriteRequestSize { + requests = append(requests, currentRequest) + currentRequest = &prompb.WriteRequest{} + } + currentRequest.Timeseries = append(currentRequest.Timeseries, ts) + } + + for i, s := range a.samples { + appendTimeSeries(prompb.TimeSeries{ + Labels: a.labels[i], + Samples: []prompb.Sample{s}, + }) + } + + for i, e := range a.exemplars { + appendTimeSeries(prompb.TimeSeries{ + Labels: a.exemplarLabels[i], + Exemplars: []prompb.Exemplar{e}, + }) + } + + if len(currentRequest.Timeseries) != 0 { + requests = append(requests, currentRequest) + } + + return requests +} + +func (a *remoteWriteAppender) sendRequests(reqs []*prompb.WriteRequest) error { + for _, req := range reqs { + err := a.sendWriteRequest(req) + if err != nil { + return err + } + } + return nil +} + +func (a *remoteWriteAppender) sendWriteRequest(req *prompb.WriteRequest) error { + bytes, err := req.Marshal() + if err != nil { + return err + } + bytes = snappy.Encode(nil, bytes) + + // TODO the returned error can be of type RecoverableError with a retryAfter duration, should we do something with this? + return a.remoteWriter.Store(a.ctx, bytes) +} + +func (a *remoteWriteAppender) Rollback() error { + a.clearBuffers() + return nil +} + +func (a *remoteWriteAppender) clearBuffers() { + a.labels = nil + a.samples = nil + a.exemplars = nil + a.exemplarLabels = nil +} + +func labelsToLabelsProto(labels labels.Labels) []prompb.Label { + result := make([]prompb.Label, len(labels)) + for i, l := range labels { + result[i] = prompb.Label{ + Name: l.Name, + Value: l.Value, + } + } + return result +} diff --git a/modules/generator/remotewrite/appender_test.go b/modules/generator/remotewrite/appender_test.go new file mode 100644 index 00000000000..9bdb15115ff --- /dev/null +++ b/modules/generator/remotewrite/appender_test.go @@ -0,0 +1,104 @@ +package remotewrite + +import ( + "bytes" + "context" + "fmt" + "os" + "testing" + "time" + + gokitlog "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/storage/remote" + "github.com/stretchr/testify/assert" +) + +func Test_remoteWriteAppendable_splitRequests(t *testing.T) { + nowMs := time.Now().UnixMilli() + + // swap out global maxWriteRequestSize during test + originalMaxWriteRequestSize := maxWriteRequestSize + maxWriteRequestSize = 120 // roughly corresponds with 2 timeseries per request + defer func() { + maxWriteRequestSize = originalMaxWriteRequestSize + }() + + mockWriteClient := &mockWriteClient{} + + appender := &remoteWriteAppender{ + logger: gokitlog.NewLogfmtLogger(os.Stdout), + ctx: context.Background(), + remoteWriter: &remoteWriteClient{WriteClient: mockWriteClient}, + userID: "", + metrics: NewMetrics(prometheus.NewRegistry()), + } + + // Send samples + for i := 0; i < 3; i++ { + _, err := appender.Append(0, labels.Labels{{Name: "label", Value: "value"}}, nowMs, float64(i+1)) + assert.NoError(t, err) + } + // Send exemplars + for i := 0; i < 2; i++ { + _, err := appender.AppendExemplar(0, labels.Labels{{Name: "label", Value: "value"}}, exemplar.Exemplar{ + Labels: labels.Labels{{Name: "exemplarLabel", Value: "exemplarValue"}}, + Value: float64(i + 1), + Ts: nowMs, + HasTs: true, + }) + assert.NoError(t, err) + } + + err := appender.Commit() + assert.NoError(t, err) + + assert.Equal(t, mockWriteClient.storeInvocations, 3) + + // Verify samples + for i := 0; i < 3; i++ { + timeseries := mockWriteClient.capturedTimeseries[i] + + assert.Equal(t, `name:"label" value:"value" `, timeseries.Labels[0].String()) + assert.Equal(t, fmt.Sprintf(`value:%d timestamp:%d `, i+1, nowMs), timeseries.Samples[0].String()) + assert.Len(t, timeseries.Exemplars, 0) + } + // Verify exemplars + for i := 0; i < 2; i++ { + timeseries := mockWriteClient.capturedTimeseries[i+3] + + assert.Equal(t, `name:"label" value:"value" `, timeseries.Labels[0].String()) + assert.Len(t, timeseries.Samples, 0) + assert.Equal(t, fmt.Sprintf(`labels: value:%d timestamp:%d `, i+1, nowMs), timeseries.Exemplars[0].String()) + } +} + +type mockWriteClient struct { + storeInvocations int + capturedTimeseries []prompb.TimeSeries +} + +var _ remote.WriteClient = (*mockWriteClient)(nil) + +func (m *mockWriteClient) Name() string { + return "mockWriteClient" +} + +func (m *mockWriteClient) Endpoint() string { + return "mockEndpoint" +} + +func (m *mockWriteClient) Store(ctx context.Context, b []byte) error { + m.storeInvocations++ + + writeRequest, err := remote.DecodeWriteRequest(bytes.NewReader(b)) + if err != nil { + return err + } + m.capturedTimeseries = append(m.capturedTimeseries, writeRequest.Timeseries...) + + return nil +} diff --git a/modules/generator/remotewrite/client.go b/modules/generator/remotewrite/client.go new file mode 100644 index 00000000000..a3f489a42f4 --- /dev/null +++ b/modules/generator/remotewrite/client.go @@ -0,0 +1,59 @@ +package remotewrite + +import ( + "fmt" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/storage/remote" + + "github.com/grafana/tempo/cmd/tempo/build" +) + +const ( + userAgentHeader = "User-Agent" + xScopeOrgIDHeader = "X-Scope-Orgid" +) + +var remoteWriteUserAgent = fmt.Sprintf("tempo-remote-write/%s", build.Version) + +type remoteWriteClient struct { + remote.WriteClient +} + +// newRemoteWriteClient creates a Prometheus remote.WriteClient. If tenantID is not empty, it sets +// the X-Scope-Orgid header on every request. +func newRemoteWriteClient(cfg *config.RemoteWriteConfig, tenantID string) (*remoteWriteClient, error) { + headers := copyMap(cfg.Headers) + headers[userAgentHeader] = remoteWriteUserAgent + if tenantID != "" { + headers[xScopeOrgIDHeader] = tenantID + } + + writeClient, err := remote.NewWriteClient( + "metrics_generator", + &remote.ClientConfig{ + URL: cfg.URL, + Timeout: cfg.RemoteTimeout, + HTTPClientConfig: cfg.HTTPClientConfig, + Headers: headers, + }, + ) + if err != nil { + return nil, fmt.Errorf("could not create remote-write client for tenant: %s", tenantID) + } + + return &remoteWriteClient{ + WriteClient: writeClient, + }, nil +} + +// copyMap creates a new map containing all values from the given map. +func copyMap(m map[string]string) map[string]string { + newMap := make(map[string]string, len(m)) + + for k, v := range m { + newMap[k] = v + } + + return newMap +} diff --git a/modules/generator/remotewrite/client_test.go b/modules/generator/remotewrite/client_test.go new file mode 100644 index 00000000000..d83e6546140 --- /dev/null +++ b/modules/generator/remotewrite/client_test.go @@ -0,0 +1,102 @@ +package remotewrite + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "testing" + + prometheus_common_config "github.com/prometheus/common/config" + "github.com/prometheus/prometheus/config" + "github.com/stretchr/testify/assert" +) + +func Test_remoteWriteClient(t *testing.T) { + var err error + var capturedHeaders http.Header + var capturedBody []byte + + server := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + capturedHeaders = req.Header + capturedBody, err = io.ReadAll(req.Body) + assert.NoError(t, err) + })) + defer server.Close() + + url, err := url.Parse(fmt.Sprintf("http://%s/receive", server.Listener.Addr().String())) + assert.NoError(t, err) + + cfg := &config.DefaultRemoteWriteConfig + cfg.URL = &prometheus_common_config.URL{URL: url} + + t.Run("remoteWriteClient with custom headers", func(t *testing.T) { + cfg.Headers = map[string]string{ + // User-Agent header can not be overridden + userAgentHeader: "my-custom-user-agent", + "Authorization": "Basic *****", + } + data := []byte("test request #1") + + client, err := newRemoteWriteClient(cfg, "") + assert.NoError(t, err) + + err = client.Store(context.Background(), data) + assert.NoError(t, err) + + assert.Equal(t, data, capturedBody) + expectedHeaders := http.Header{ + "Authorization": {"Basic *****"}, + "Content-Encoding": {"snappy"}, + "Content-Length": {strconv.Itoa(len(data))}, + "Content-Type": {"application/x-protobuf"}, + userAgentHeader: {remoteWriteUserAgent}, + "X-Prometheus-Remote-Write-Version": {"0.1.0"}, + } + assert.Equal(t, expectedHeaders, capturedHeaders) + }) + + t.Run("remoteWriteClient with tenantID", func(t *testing.T) { + cfg.Headers = nil + data := []byte("test request #2") + + clientWithXScopeOrg, err := newRemoteWriteClient(cfg, "my-tenant") + assert.NoError(t, err) + + err = clientWithXScopeOrg.Store(context.Background(), data) + assert.NoError(t, err) + + assert.Equal(t, data, capturedBody) + expectedHeaders := http.Header{ + "Content-Encoding": {"snappy"}, + "Content-Length": {strconv.Itoa(len(data))}, + "Content-Type": {"application/x-protobuf"}, + userAgentHeader: {remoteWriteUserAgent}, + "X-Prometheus-Remote-Write-Version": {"0.1.0"}, + xScopeOrgIDHeader: {"my-tenant"}, + } + assert.Equal(t, expectedHeaders, capturedHeaders) + }) + +} + +func Test_copyMap(t *testing.T) { + original := map[string]string{ + "k1": "v1", + "k2": "v2", + } + + copied := copyMap(original) + + assert.Equal(t, original, copied) + + copied["k2"] = "other value" + copied["k3"] = "v3" + + assert.Len(t, original, 2) + assert.Equal(t, "v2", original["k2"]) + assert.Equal(t, "", original["k3"]) +} diff --git a/modules/generator/remotewrite/config.go b/modules/generator/remotewrite/config.go new file mode 100644 index 00000000000..599b3b3d08e --- /dev/null +++ b/modules/generator/remotewrite/config.go @@ -0,0 +1,17 @@ +package remotewrite + +import ( + "flag" + + "github.com/prometheus/prometheus/config" +) + +type Config struct { + // Enable remote-write requests. If disabled all generated metrics will be discarded. + Enabled bool `yaml:"enabled"` + + Client config.RemoteWriteConfig `yaml:"client"` +} + +func (cfg *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) { +} diff --git a/modules/generator/remotewrite/metrics.go b/modules/generator/remotewrite/metrics.go new file mode 100644 index 00000000000..880ee3161bd --- /dev/null +++ b/modules/generator/remotewrite/metrics.go @@ -0,0 +1,40 @@ +package remotewrite + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +type Metrics struct { + samplesSent *prometheus.CounterVec + exemplarsSent *prometheus.CounterVec + remoteWriteErrors *prometheus.CounterVec + remoteWriteTotal *prometheus.CounterVec +} + +// NewMetrics creates a Metrics and registers all counters with the given prometheus.Registerer. To +// avoid registering metrics twice, this method should only be called once per Registerer. +func NewMetrics(reg prometheus.Registerer) *Metrics { + return &Metrics{ + samplesSent: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Name: "metrics_generator_samples_sent_total", + Help: "Number of samples sent", + }, []string{"tenant"}), + exemplarsSent: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Name: "metrics_generator_exemplars_sent_total", + Help: "Number of exemplars sent", + }, []string{"tenant"}), + remoteWriteErrors: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Name: "metrics_generator_remote_write_errors", + Help: "Number of remote-write requests that failed due to error.", + }, []string{"tenant"}), + remoteWriteTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Name: "metrics_generator_remote_write_total", + Help: "Number of remote-write requests.", + }, []string{"tenant"}), + } +} diff --git a/modules/generator/remotewrite/noop.go b/modules/generator/remotewrite/noop.go new file mode 100644 index 00000000000..ba8629f7fd7 --- /dev/null +++ b/modules/generator/remotewrite/noop.go @@ -0,0 +1,35 @@ +package remotewrite + +import ( + "context" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" +) + +// NoopAppender implements storage.Appendable and storage.Appender +type NoopAppender struct{} + +var _ storage.Appendable = (*NoopAppender)(nil) +var _ storage.Appender = (*NoopAppender)(nil) + +func (a *NoopAppender) Appender(_ context.Context) storage.Appender { + return a +} + +func (a *NoopAppender) Append(_ storage.SeriesRef, _ labels.Labels, _ int64, _ float64) (storage.SeriesRef, error) { + return 0, nil +} + +func (a *NoopAppender) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) { + return 0, nil +} + +func (a *NoopAppender) Commit() error { + return nil +} + +func (a *NoopAppender) Rollback() error { + return nil +} diff --git a/modules/overrides/limits.go b/modules/overrides/limits.go index 1fdb0991d64..14933f5c72e 100644 --- a/modules/overrides/limits.go +++ b/modules/overrides/limits.go @@ -55,6 +55,9 @@ type Limits struct { MaxBytesPerTrace int `yaml:"max_bytes_per_trace" json:"max_bytes_per_trace"` MaxSearchBytesPerTrace int `yaml:"max_search_bytes_per_trace" json:"max_search_bytes_per_trace"` + // Metrics-generator processor config + MetricsGeneratorProcessors ListToMap `yaml:"metrics_generator_processors" json:"metrics_generator_processors"` + // Compactor enforced limits. BlockRetention model.Duration `yaml:"block_retention" json:"block_retention"` diff --git a/modules/overrides/overrides.go b/modules/overrides/overrides.go index ccef6371ad0..aeab40136e5 100644 --- a/modules/overrides/overrides.go +++ b/modules/overrides/overrides.go @@ -263,6 +263,11 @@ func (o *Overrides) SearchTagsAllowList(userID string) map[string]struct{} { return o.getOverridesForUser(userID).SearchTagsAllowList.GetMap() } +// MetricsGeneratorProcessors returns the metrics-generator processors enabled for this tenant. +func (o *Overrides) MetricsGeneratorProcessors(userID string) map[string]struct{} { + return o.getOverridesForUser(userID).MetricsGeneratorProcessors.GetMap() +} + // BlockRetention is the duration of the block retention for this tenant. func (o *Overrides) BlockRetention(userID string) time.Duration { return time.Duration(o.getOverridesForUser(userID).BlockRetention) diff --git a/operations/jsonnet/microservices/config.libsonnet b/operations/jsonnet/microservices/config.libsonnet index b3f15da7d4c..862f891f25c 100644 --- a/operations/jsonnet/microservices/config.libsonnet +++ b/operations/jsonnet/microservices/config.libsonnet @@ -79,6 +79,19 @@ }, }, }, + metrics_generator: { + replicas: 0, + resources: { + requests:{ + cpu: '500m', + memory: '1Gi' + }, + limits: { + cpu: '1', + memory: '2Gi', + }, + }, + }, memcached: { replicas: 3, connection_limit: 4096, @@ -104,6 +117,7 @@ backend: error 'Must specify a backend', // gcs|s3 bucket: error 'Must specify a bucket', search_enabled: false, + metrics_generator_enabled: false, overrides_configmap_name: 'tempo-overrides', overrides+:: { diff --git a/operations/jsonnet/microservices/configmap.libsonnet b/operations/jsonnet/microservices/configmap.libsonnet index 9595ce34b2e..76c93d1eb73 100644 --- a/operations/jsonnet/microservices/configmap.libsonnet +++ b/operations/jsonnet/microservices/configmap.libsonnet @@ -4,6 +4,7 @@ tempo_config:: { search_enabled: $._config.search_enabled, + metrics_generator_enabled: $._config.metrics_generator_enabled, http_api_prefix: $._config.http_api_prefix, server: { @@ -65,6 +66,8 @@ tempo_ingester_config:: $.tempo_config {}, + tempo_metrics_generator_config:: $.tempo_config{}, + tempo_compactor_config:: $.tempo_config { compactor+: { compaction+: { @@ -126,6 +129,12 @@ 'tempo.yaml': k.util.manifestYaml($.tempo_ingester_config), }), + tempo_metrics_generator_configmap: + configMap.new('tempo-metrics-generator') + + configMap.withData({ + 'tempo.yaml': $.util.manifestYaml($.tempo_metrics_generator_config), + }), + tempo_compactor_configmap: configMap.new('tempo-compactor') + configMap.withData({ diff --git a/operations/jsonnet/microservices/generator.libsonnet b/operations/jsonnet/microservices/generator.libsonnet new file mode 100644 index 00000000000..df77ea6c880 --- /dev/null +++ b/operations/jsonnet/microservices/generator.libsonnet @@ -0,0 +1,52 @@ +{ + local k = import 'ksonnet-util/kausal.libsonnet', + local container = k.core.v1.container, + local containerPort = k.core.v1.containerPort, + local volumeMount = k.core.v1.volumeMount, + local deployment = k.apps.v1.deployment, + local volume = k.core.v1.volume, + + local target_name = 'metrics-generator', + local tempo_config_volume = 'tempo-conf', + local tempo_overrides_config_volume = 'overrides', + + tempo_metrics_generator_container:: + container.new(target_name, $._images.tempo) + + container.withPorts([ + containerPort.new('prom-metrics', $._config.port), + ]) + + container.withArgs([ + '-target=' + target_name, + '-config.file=/conf/tempo.yaml', + '-mem-ballast-size-mbs=' + $._config.ballast_size_mbs, + ]) + + container.withVolumeMounts([ + volumeMount.new(tempo_config_volume, '/conf'), + volumeMount.new(tempo_overrides_config_volume, '/overrides'), + ]) + + $.util.withResources($._config.metrics_generator.resources) + + $.util.readinessProbe, + + tempo_metrics_generator_deployment: + deployment.new( + target_name, + $._config.metrics_generator.replicas, + $.tempo_metrics_generator_container, + { + app: target_name, + [$._config.gossip_member_label]: 'true', + } + ) + + deployment.mixin.spec.strategy.rollingUpdate.withMaxSurge(3) + + deployment.mixin.spec.strategy.rollingUpdate.withMaxUnavailable(1) + + deployment.mixin.spec.template.metadata.withAnnotations({ + config_hash: std.md5(std.toString($.tempo_metrics_generator_configmap.data['tempo.yaml'])), + }) + + deployment.mixin.spec.template.spec.withVolumes([ + volume.fromConfigMap(tempo_config_volume, $.tempo_metrics_generator_configmap.metadata.name), + volume.fromConfigMap(tempo_overrides_config_volume, $._config.overrides_configmap_name), + ]), + + tempo_metrics_generator_service: + k.util.serviceFor($.tempo_metrics_generator_deployment), +} diff --git a/operations/jsonnet/microservices/tempo.libsonnet b/operations/jsonnet/microservices/tempo.libsonnet index 5f2b48eba6f..40d6c867602 100644 --- a/operations/jsonnet/microservices/tempo.libsonnet +++ b/operations/jsonnet/microservices/tempo.libsonnet @@ -4,6 +4,7 @@ (import 'compactor.libsonnet') + (import 'distributor.libsonnet') + (import 'ingester.libsonnet') + +(import 'generator.libsonnet') + (import 'frontend.libsonnet') + (import 'querier.libsonnet') + (import 'vulture.libsonnet') + diff --git a/operations/kube-manifests/ConfigMap-tempo-compactor.yaml b/operations/kube-manifests/ConfigMap-tempo-compactor.yaml index 82215c0e656..27451fda827 100644 --- a/operations/kube-manifests/ConfigMap-tempo-compactor.yaml +++ b/operations/kube-manifests/ConfigMap-tempo-compactor.yaml @@ -19,6 +19,7 @@ data: bind_port: 7946 join_members: - gossip-ring.tracing.svc.cluster.local:7946 + metrics_generator_enabled: false overrides: per_tenant_override_config: /overrides/overrides.yaml search_enabled: false diff --git a/operations/kube-manifests/ConfigMap-tempo-distributor.yaml b/operations/kube-manifests/ConfigMap-tempo-distributor.yaml index 13f99376c85..e37e19175a3 100644 --- a/operations/kube-manifests/ConfigMap-tempo-distributor.yaml +++ b/operations/kube-manifests/ConfigMap-tempo-distributor.yaml @@ -22,6 +22,7 @@ data: bind_port: 7946 join_members: - gossip-ring.tracing.svc.cluster.local:7946 + metrics_generator_enabled: false overrides: per_tenant_override_config: /overrides/overrides.yaml search_enabled: false diff --git a/operations/kube-manifests/ConfigMap-tempo-ingester.yaml b/operations/kube-manifests/ConfigMap-tempo-ingester.yaml index fe000f8fb5f..80e421f4952 100644 --- a/operations/kube-manifests/ConfigMap-tempo-ingester.yaml +++ b/operations/kube-manifests/ConfigMap-tempo-ingester.yaml @@ -13,6 +13,7 @@ data: bind_port: 7946 join_members: - gossip-ring.tracing.svc.cluster.local:7946 + metrics_generator_enabled: false overrides: per_tenant_override_config: /overrides/overrides.yaml search_enabled: false diff --git a/operations/kube-manifests/ConfigMap-tempo-metrics-generator.yaml b/operations/kube-manifests/ConfigMap-tempo-metrics-generator.yaml new file mode 100644 index 00000000000..7fdabd9a7b9 --- /dev/null +++ b/operations/kube-manifests/ConfigMap-tempo-metrics-generator.yaml @@ -0,0 +1,46 @@ +apiVersion: v1 +data: + tempo.yaml: | + compactor: {} + distributor: {} + http_api_prefix: "" + ingester: + lifecycler: + ring: + replication_factor: 3 + memberlist: + abort_if_cluster_join_fails: false + bind_port: 7946 + join_members: + - gossip-ring.tracing.svc.cluster.local:7946 + metrics_generator_enabled: false + overrides: + per_tenant_override_config: /overrides/overrides.yaml + search_enabled: false + server: + http_listen_port: 3200 + storage: + trace: + azure: + container-name: tempo + backend: gcs + blocklist_poll: "0" + cache: memcached + gcs: + bucket_name: tempo + chunk_buffer_size: 1.048576e+07 + memcached: + consistent_hash: true + host: memcached + service: memcached-client + timeout: 200ms + pool: + queue_depth: 2000 + s3: + bucket: tempo + wal: + path: /var/tempo/wal +kind: ConfigMap +metadata: + name: tempo-metrics-generator + namespace: tracing diff --git a/operations/kube-manifests/ConfigMap-tempo-querier.yaml b/operations/kube-manifests/ConfigMap-tempo-querier.yaml index 81edd976c73..4106e81604f 100644 --- a/operations/kube-manifests/ConfigMap-tempo-querier.yaml +++ b/operations/kube-manifests/ConfigMap-tempo-querier.yaml @@ -13,6 +13,7 @@ data: bind_port: 7946 join_members: - gossip-ring.tracing.svc.cluster.local:7946 + metrics_generator_enabled: false overrides: per_tenant_override_config: /overrides/overrides.yaml querier: diff --git a/operations/kube-manifests/ConfigMap-tempo-query-frontend.yaml b/operations/kube-manifests/ConfigMap-tempo-query-frontend.yaml index 8d9a7a16c21..abe8589e85a 100644 --- a/operations/kube-manifests/ConfigMap-tempo-query-frontend.yaml +++ b/operations/kube-manifests/ConfigMap-tempo-query-frontend.yaml @@ -13,6 +13,7 @@ data: bind_port: 7946 join_members: - gossip-ring.tracing.svc.cluster.local:7946 + metrics_generator_enabled: false overrides: per_tenant_override_config: /overrides/overrides.yaml search_enabled: false diff --git a/operations/kube-manifests/Deployment-compactor.yaml b/operations/kube-manifests/Deployment-compactor.yaml index f3a8220497a..08f6a1c9d20 100644 --- a/operations/kube-manifests/Deployment-compactor.yaml +++ b/operations/kube-manifests/Deployment-compactor.yaml @@ -18,7 +18,7 @@ spec: template: metadata: annotations: - config_hash: 4cb3f995688a969b5ff829176dac75a0 + config_hash: 39135b35e93010b0064838bd8f37a47f labels: app: compactor name: compactor diff --git a/operations/kube-manifests/Deployment-distributor.yaml b/operations/kube-manifests/Deployment-distributor.yaml index bce30a305ce..23bd880004d 100644 --- a/operations/kube-manifests/Deployment-distributor.yaml +++ b/operations/kube-manifests/Deployment-distributor.yaml @@ -19,7 +19,7 @@ spec: template: metadata: annotations: - config_hash: 1d576bcfcaa599ba1cac6892924b561a + config_hash: 1419befab9a6b69d88896bd1f4394654 labels: app: distributor name: distributor diff --git a/operations/kube-manifests/Deployment-metrics-generator.yaml b/operations/kube-manifests/Deployment-metrics-generator.yaml new file mode 100644 index 00000000000..233d0c67877 --- /dev/null +++ b/operations/kube-manifests/Deployment-metrics-generator.yaml @@ -0,0 +1,63 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metrics-generator + namespace: tracing +spec: + minReadySeconds: 10 + replicas: 0 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: metrics-generator + name: metrics-generator + tempo-gossip-member: "true" + strategy: + rollingUpdate: + maxSurge: 3 + maxUnavailable: 1 + template: + metadata: + annotations: + config_hash: 345364f17b5771460510d3d5b9028e3c + labels: + app: metrics-generator + name: metrics-generator + tempo-gossip-member: "true" + spec: + containers: + - args: + - -target=metrics-generator + - -config.file=/conf/tempo.yaml + - -mem-ballast-size-mbs=1024 + image: grafana/tempo:latest + imagePullPolicy: IfNotPresent + name: metrics-generator + ports: + - containerPort: 3200 + name: prom-metrics + readinessProbe: + httpGet: + path: /ready + port: 3200 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + cpu: "1" + memory: 2Gi + requests: + cpu: 500m + memory: 1Gi + volumeMounts: + - mountPath: /conf + name: tempo-conf + - mountPath: /overrides + name: overrides + volumes: + - configMap: + name: tempo-metrics-generator + name: tempo-conf + - configMap: + name: tempo-overrides + name: overrides diff --git a/operations/kube-manifests/Deployment-querier.yaml b/operations/kube-manifests/Deployment-querier.yaml index 64e07b2f334..43fabd3ae04 100644 --- a/operations/kube-manifests/Deployment-querier.yaml +++ b/operations/kube-manifests/Deployment-querier.yaml @@ -19,7 +19,7 @@ spec: template: metadata: annotations: - config_hash: aed646de3271d9fd648967f6425d56cd + config_hash: a584f6c6479c64143d0008609e0b0c25 labels: app: querier name: querier diff --git a/operations/kube-manifests/Deployment-query-frontend.yaml b/operations/kube-manifests/Deployment-query-frontend.yaml index b5f602ab8f3..b993b5055a6 100644 --- a/operations/kube-manifests/Deployment-query-frontend.yaml +++ b/operations/kube-manifests/Deployment-query-frontend.yaml @@ -18,7 +18,7 @@ spec: template: metadata: annotations: - config_hash: 5a93d7e918d69514c1d11fb588c70a7b + config_hash: 345364f17b5771460510d3d5b9028e3c labels: app: query-frontend name: query-frontend diff --git a/operations/kube-manifests/Service-metrics-generator.yaml b/operations/kube-manifests/Service-metrics-generator.yaml new file mode 100644 index 00000000000..b4192b9a4ce --- /dev/null +++ b/operations/kube-manifests/Service-metrics-generator.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + name: metrics-generator + name: metrics-generator + namespace: tracing +spec: + ports: + - name: metrics-generator-prom-metrics + port: 3200 + targetPort: 3200 + selector: + app: metrics-generator + name: metrics-generator + tempo-gossip-member: "true" diff --git a/operations/kube-manifests/StatefulSet-ingester.yaml b/operations/kube-manifests/StatefulSet-ingester.yaml index 72a7514b830..2b14ca2edd5 100644 --- a/operations/kube-manifests/StatefulSet-ingester.yaml +++ b/operations/kube-manifests/StatefulSet-ingester.yaml @@ -15,7 +15,7 @@ spec: template: metadata: annotations: - config_hash: 5a93d7e918d69514c1d11fb588c70a7b + config_hash: 345364f17b5771460510d3d5b9028e3c labels: app: ingester name: ingester diff --git a/operations/kube-manifests/util/jsonnetfile.lock.json b/operations/kube-manifests/util/jsonnetfile.lock.json index 366abfc3f07..ffe88cd131c 100644 --- a/operations/kube-manifests/util/jsonnetfile.lock.json +++ b/operations/kube-manifests/util/jsonnetfile.lock.json @@ -8,8 +8,8 @@ "subdir": "ksonnet-util" } }, - "version": "b102f9ac7d1290ac025c2a7ac99f7fd9a9948503", - "sum": "fFVlCoa/N0qiqTbDhZAEdRm2Vv76Z9Clxp3/haJ+PyA=" + "version": "03d32a72a2a0bf0ee00ffc853be5f07ad3bafcbe", + "sum": "JDsc/bUs5Yv1RkGKcm0hMteqCKZqemxA3qP6eiEATr8=" }, { "source": { @@ -18,7 +18,7 @@ "subdir": "memcached" } }, - "version": "b102f9ac7d1290ac025c2a7ac99f7fd9a9948503", + "version": "03d32a72a2a0bf0ee00ffc853be5f07ad3bafcbe", "sum": "dTOeEux3t9bYSqP2L/uCuLo/wUDpCKH4w+4OD9fePUk=" }, { diff --git a/operations/kube-manifests/util/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/util.libsonnet b/operations/kube-manifests/util/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/util.libsonnet index aaecdabbba8..5c785669b82 100644 --- a/operations/kube-manifests/util/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/util.libsonnet +++ b/operations/kube-manifests/util/vendor/github.com/grafana/jsonnet-libs/ksonnet-util/util.libsonnet @@ -1,12 +1,21 @@ // util.libsonnet provides a number of useful (opinionated) shortcuts to replace boilerplate code local util(k) = { + // checkFlagsMap checks map for presence of flags that values with both with and without prefix set ('foo' and '-foo'). + checkFlagsMap(map, prefix): [ + error 'key "%(key)s" provided with value "%(value)s" but key "%(prefix)s%(key)s" was provided too with value "%(otherValue)s", if want to ignore this, set check=false in mapToFlags' % + { key: key, value: map[key], prefix: prefix, otherValue: map[prefix + key] } + for key in std.objectFields(map) + if map[key] != null && std.objectHas(map, prefix + key) && map[prefix + key] != null + ], + // mapToFlags converts a map to a set of golang-style command line flags. - mapToFlags(map, prefix='-'): [ + // if check=true, it will check for 'foo' and '-foo' presence, failing in that case. + mapToFlags(map, prefix='-', check=true): [ '%s%s=%s' % [prefix, key, map[key]] for key in std.objectFields(map) if map[key] != null - ], + ] + if check then $.checkFlagsMap(map, prefix) else [], // serviceFor create service for a given deployment. serviceFor(deployment, ignored_labels=[], nameFormat='%(container)s-%(port)s'):: diff --git a/pkg/tempopb/tempo.pb.go b/pkg/tempopb/tempo.pb.go index 2d03a3f52c5..8f0fd70df91 100644 --- a/pkg/tempopb/tempo.pb.go +++ b/pkg/tempopb/tempo.pb.go @@ -878,6 +878,51 @@ func (m *PushBytesRequest) XXX_DiscardUnknown() { var xxx_messageInfo_PushBytesRequest proto.InternalMessageInfo +type PushSpansRequest struct { + // just send entire OTel spans for now + Batches []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=batches,proto3" json:"batches,omitempty"` +} + +func (m *PushSpansRequest) Reset() { *m = PushSpansRequest{} } +func (m *PushSpansRequest) String() string { return proto.CompactTextString(m) } +func (*PushSpansRequest) ProtoMessage() {} +func (*PushSpansRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f22805646f4f62b6, []int{15} +} +func (m *PushSpansRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PushSpansRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PushSpansRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PushSpansRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PushSpansRequest.Merge(m, src) +} +func (m *PushSpansRequest) XXX_Size() int { + return m.Size() +} +func (m *PushSpansRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PushSpansRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PushSpansRequest proto.InternalMessageInfo + +func (m *PushSpansRequest) GetBatches() []*v1.ResourceSpans { + if m != nil { + return m.Batches + } + return nil +} + type TraceBytes struct { // pre-marshalled Traces Traces [][]byte `protobuf:"bytes,1,rep,name=traces,proto3" json:"traces,omitempty"` @@ -887,7 +932,7 @@ func (m *TraceBytes) Reset() { *m = TraceBytes{} } func (m *TraceBytes) String() string { return proto.CompactTextString(m) } func (*TraceBytes) ProtoMessage() {} func (*TraceBytes) Descriptor() ([]byte, []int) { - return fileDescriptor_f22805646f4f62b6, []int{15} + return fileDescriptor_f22805646f4f62b6, []int{16} } func (m *TraceBytes) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -940,79 +985,82 @@ func init() { proto.RegisterType((*Trace)(nil), "tempopb.Trace") proto.RegisterType((*PushResponse)(nil), "tempopb.PushResponse") proto.RegisterType((*PushBytesRequest)(nil), "tempopb.PushBytesRequest") + proto.RegisterType((*PushSpansRequest)(nil), "tempopb.PushSpansRequest") proto.RegisterType((*TraceBytes)(nil), "tempopb.TraceBytes") } func init() { proto.RegisterFile("pkg/tempopb/tempo.proto", fileDescriptor_f22805646f4f62b6) } var fileDescriptor_f22805646f4f62b6 = []byte{ - // 1045 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xcd, 0x6e, 0xdb, 0x46, + // 1076 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xcd, 0x6e, 0xdb, 0x46, 0x10, 0x36, 0x25, 0xd9, 0x32, 0xc7, 0x92, 0x7f, 0x36, 0x89, 0xcd, 0x32, 0x86, 0x6c, 0x10, 0x46, - 0xab, 0x43, 0x23, 0x27, 0x4a, 0xda, 0xb4, 0xb9, 0x14, 0x15, 0xec, 0xb6, 0x06, 0xaa, 0xc0, 0xa5, - 0x54, 0xdf, 0x57, 0xe4, 0x46, 0x26, 0x2c, 0x71, 0x19, 0x72, 0x25, 0x58, 0x7d, 0x80, 0x9e, 0x8a, + 0xab, 0x43, 0x23, 0x27, 0x4a, 0xda, 0xb4, 0xb9, 0x14, 0x15, 0xec, 0xa6, 0x06, 0xaa, 0xc0, 0xa5, + 0x5c, 0xdf, 0x57, 0xe4, 0x46, 0x26, 0x2c, 0x71, 0x19, 0x72, 0x65, 0xd8, 0x7d, 0x80, 0x9e, 0x8a, 0xa2, 0xaf, 0xd0, 0x47, 0xe8, 0x5b, 0xe4, 0xd0, 0x02, 0x39, 0x16, 0x3d, 0x04, 0x85, 0xfd, 0x1e, - 0x45, 0xb1, 0x7f, 0x14, 0x49, 0x29, 0x3e, 0xe4, 0x24, 0xce, 0x37, 0xdf, 0xce, 0xee, 0xcc, 0x7e, - 0x33, 0x2b, 0xd8, 0x8b, 0xae, 0x86, 0xc7, 0x8c, 0x8c, 0x23, 0x1a, 0x0d, 0xe4, 0x6f, 0x2b, 0x8a, - 0x29, 0xa3, 0xa8, 0xaa, 0x40, 0xfb, 0x3e, 0x8b, 0xb1, 0x47, 0x8e, 0xa7, 0x4f, 0x8e, 0xc5, 0x87, - 0x74, 0xdb, 0x8f, 0x86, 0x01, 0xbb, 0x9c, 0x0c, 0x5a, 0x1e, 0x1d, 0x1f, 0x0f, 0xe9, 0x90, 0x1e, - 0x0b, 0x78, 0x30, 0x79, 0x25, 0x2c, 0x61, 0x88, 0x2f, 0x49, 0x77, 0x7e, 0x36, 0x60, 0xbb, 0xcf, - 0x97, 0x77, 0x66, 0x67, 0x27, 0x2e, 0x79, 0x3d, 0x21, 0x09, 0x43, 0x16, 0x54, 0x45, 0xc8, 0xb3, - 0x13, 0xcb, 0x38, 0x34, 0x9a, 0x35, 0x57, 0x9b, 0xa8, 0x01, 0x30, 0x18, 0x51, 0xef, 0xaa, 0xc7, - 0x70, 0xcc, 0xac, 0xd2, 0xa1, 0xd1, 0x34, 0xdd, 0x0c, 0x82, 0x6c, 0x58, 0x17, 0xd6, 0x69, 0xe8, - 0x5b, 0x65, 0xe1, 0x4d, 0x6d, 0xb4, 0x0f, 0xe6, 0xeb, 0x09, 0x89, 0x67, 0x5d, 0xea, 0x13, 0x6b, - 0x55, 0x38, 0xe7, 0x80, 0x13, 0xc2, 0x4e, 0xe6, 0x1c, 0x49, 0x44, 0xc3, 0x84, 0xa0, 0x23, 0x58, - 0x15, 0x3b, 0x8b, 0x63, 0x6c, 0xb4, 0x37, 0x5b, 0x2a, 0xf7, 0x96, 0xa0, 0xba, 0xd2, 0x89, 0x9e, - 0x42, 0x75, 0x4c, 0x58, 0x1c, 0x78, 0x89, 0x38, 0xd1, 0x46, 0xfb, 0xa3, 0x3c, 0x8f, 0x87, 0xec, - 0x4a, 0x82, 0xab, 0x99, 0xce, 0xe7, 0x99, 0xbc, 0x95, 0x13, 0x39, 0x50, 0x7b, 0x85, 0x83, 0x11, - 0xf1, 0x3b, 0xfc, 0xcc, 0x89, 0xd8, 0xb5, 0xee, 0xe6, 0x30, 0xe7, 0xd7, 0x12, 0xd4, 0x7b, 0x04, - 0xc7, 0xde, 0xa5, 0xae, 0xd6, 0x0b, 0xa8, 0xf4, 0xf1, 0x90, 0xb3, 0xcb, 0xcd, 0x8d, 0xf6, 0x61, - 0xba, 0x77, 0x8e, 0xd5, 0xe2, 0x94, 0xd3, 0x90, 0xc5, 0xb3, 0x4e, 0xe5, 0xcd, 0xbb, 0x83, 0x15, - 0x57, 0xac, 0x41, 0x47, 0x50, 0xef, 0x06, 0xe1, 0xc9, 0x24, 0xc6, 0x2c, 0xa0, 0x61, 0x57, 0x26, - 0x50, 0x77, 0xf3, 0xa0, 0x60, 0xe1, 0xeb, 0x0c, 0xab, 0xac, 0x58, 0x59, 0x10, 0xdd, 0x87, 0xd5, - 0xef, 0x83, 0x71, 0xc0, 0xac, 0x8a, 0xf0, 0x4a, 0x83, 0xa3, 0x89, 0xb8, 0xac, 0x55, 0x89, 0x0a, - 0x03, 0x6d, 0x43, 0x99, 0x84, 0xbe, 0xb5, 0x26, 0x30, 0xfe, 0x69, 0x3f, 0x07, 0x33, 0x3d, 0x22, - 0x77, 0x5f, 0x91, 0x99, 0xc8, 0xdf, 0x74, 0xf9, 0x27, 0x0f, 0x33, 0xc5, 0xa3, 0x09, 0x51, 0x77, - 0x2e, 0x8d, 0x17, 0xa5, 0x2f, 0x0c, 0xe7, 0xcf, 0x12, 0x20, 0x99, 0xaa, 0xa8, 0x90, 0xae, 0xca, - 0x33, 0x30, 0x13, 0x5d, 0x00, 0x75, 0x7d, 0xbb, 0xcb, 0x4b, 0xe3, 0xce, 0x89, 0x5c, 0x79, 0x42, - 0x2f, 0x67, 0x27, 0x6a, 0x23, 0x6d, 0x72, 0xf5, 0x88, 0xa3, 0x9f, 0xe3, 0x21, 0x51, 0xf9, 0xcf, - 0x01, 0x5e, 0xa1, 0x08, 0x0f, 0x49, 0xd2, 0xa7, 0x32, 0xb4, 0xaa, 0x41, 0x1e, 0xe4, 0xea, 0x24, - 0xa1, 0x47, 0xfd, 0x20, 0x1c, 0x2a, 0x01, 0xa6, 0x36, 0x8f, 0x10, 0x84, 0x3e, 0xb9, 0xe6, 0xe1, - 0x7a, 0xc1, 0x4f, 0x44, 0xd5, 0x26, 0x0f, 0x72, 0x85, 0x30, 0xca, 0xf0, 0xc8, 0x25, 0x1e, 0x8d, - 0xfd, 0xc4, 0xaa, 0x4a, 0x85, 0x64, 0x31, 0xce, 0xf1, 0x31, 0xc3, 0xa7, 0x7a, 0xa7, 0x75, 0xb1, - 0x53, 0x0e, 0xe3, 0x79, 0x4e, 0x49, 0x9c, 0x04, 0x34, 0xb4, 0x4c, 0x99, 0xa7, 0x32, 0x9d, 0x6b, - 0xd8, 0xd4, 0xd5, 0x51, 0x4d, 0xf0, 0x0c, 0xd6, 0x84, 0xce, 0xb5, 0xc2, 0xf6, 0xf3, 0xea, 0x96, - 0xec, 0x2e, 0x61, 0x98, 0xef, 0xe0, 0x2a, 0x2e, 0x7a, 0x5c, 0x6c, 0x8a, 0x62, 0xf5, 0x17, 0x3a, - 0xe2, 0x2f, 0x03, 0xee, 0x2d, 0x89, 0x58, 0x9c, 0x06, 0xe6, 0x7c, 0x1a, 0x34, 0x61, 0x2b, 0xa6, - 0x94, 0xf5, 0x48, 0x3c, 0x0d, 0x3c, 0xf2, 0x12, 0x8f, 0xb5, 0x3c, 0x8a, 0x30, 0xaf, 0x2e, 0x87, - 0x44, 0x78, 0xc1, 0x93, 0xc3, 0x21, 0x0f, 0xa2, 0x4f, 0x61, 0x47, 0x5c, 0x69, 0x3f, 0x18, 0x93, - 0x1f, 0xc3, 0xe0, 0xfa, 0x25, 0x0e, 0xa9, 0xb8, 0xc9, 0x8a, 0xbb, 0xe8, 0xe0, 0xb3, 0xc8, 0x9f, - 0xb7, 0x84, 0x94, 0x77, 0x06, 0x71, 0xfe, 0x30, 0x74, 0xa7, 0xea, 0xfe, 0x6e, 0xc2, 0x56, 0x10, - 0x26, 0x11, 0xf1, 0x18, 0xf1, 0xfb, 0xba, 0xa4, 0x7c, 0x59, 0x11, 0x46, 0x1f, 0xc3, 0x66, 0x0a, - 0x75, 0x66, 0x8c, 0xc8, 0x22, 0x56, 0xdc, 0x02, 0x9a, 0x8b, 0xa8, 0x86, 0x46, 0xb9, 0x10, 0x51, - 0xc2, 0xbc, 0x02, 0xc9, 0x55, 0x10, 0x45, 0x29, 0x4f, 0x29, 0x34, 0x07, 0x3a, 0xf7, 0x60, 0x47, - 0x1e, 0x99, 0xf7, 0xa2, 0xea, 0x0f, 0xe7, 0xb1, 0x6e, 0x30, 0x09, 0x2a, 0x59, 0xd8, 0xb0, 0xce, - 0xf0, 0x90, 0xd7, 0x4d, 0x0a, 0xc3, 0x74, 0x53, 0xdb, 0x69, 0xc3, 0x6e, 0xba, 0xe2, 0x82, 0x77, - 0x6a, 0x92, 0x1d, 0xed, 0x92, 0x95, 0x5e, 0xa6, 0x34, 0x9d, 0xe7, 0xb0, 0xb7, 0xb0, 0x46, 0x6d, - 0xb5, 0x0f, 0x26, 0xd3, 0xa0, 0xda, 0x6b, 0x0e, 0x38, 0x1d, 0x58, 0x15, 0x55, 0x43, 0x5f, 0x42, - 0x75, 0x80, 0x99, 0x77, 0x99, 0x2a, 0xf5, 0x20, 0x95, 0x9c, 0x7c, 0xa1, 0xa6, 0x4f, 0x5a, 0x2e, - 0x49, 0xe8, 0x24, 0xf6, 0x48, 0x2f, 0xc2, 0x61, 0xe2, 0x6a, 0xbe, 0xb3, 0x09, 0xb5, 0xf3, 0x49, - 0x92, 0x6a, 0xde, 0xf9, 0xdd, 0x80, 0x6d, 0x0e, 0x88, 0x2a, 0xeb, 0xb3, 0x3f, 0x4a, 0x1b, 0xa1, - 0x74, 0x58, 0x6e, 0xd6, 0x3a, 0x0f, 0xf8, 0x20, 0xfd, 0xe7, 0xdd, 0x41, 0xfd, 0x3c, 0x26, 0x78, - 0x34, 0xa2, 0x9e, 0x64, 0xeb, 0x0e, 0xf8, 0x04, 0xca, 0x81, 0xcf, 0xef, 0xe3, 0x0e, 0x2e, 0x67, - 0xa0, 0xcf, 0x00, 0xe4, 0x04, 0x3a, 0xc1, 0x0c, 0x5b, 0x95, 0xbb, 0xf8, 0x19, 0xa2, 0x73, 0x04, - 0xa0, 0x5e, 0x10, 0xae, 0x84, 0xdd, 0x5c, 0x97, 0xd6, 0xf4, 0x29, 0xda, 0xbf, 0x18, 0xb0, 0xc6, - 0x33, 0x21, 0x31, 0xfa, 0x0a, 0xcc, 0x34, 0x27, 0x34, 0x7f, 0xa3, 0x8a, 0x79, 0xda, 0x0f, 0x72, - 0xae, 0xb4, 0x26, 0x2b, 0xe8, 0x6b, 0xd8, 0x48, 0xc9, 0x17, 0xed, 0x0f, 0x09, 0xd1, 0xfe, 0xaf, - 0x04, 0xd5, 0x1f, 0x26, 0x24, 0x0e, 0x48, 0x8c, 0xbe, 0x83, 0xfa, 0x37, 0x41, 0xe8, 0xa7, 0xcf, - 0x20, 0x5a, 0xf2, 0x6e, 0xea, 0x80, 0xf6, 0x32, 0x57, 0xe6, 0x60, 0x35, 0x3d, 0xb4, 0x3c, 0x12, - 0x32, 0xf4, 0x9e, 0x49, 0x6f, 0xef, 0x2d, 0xe0, 0x69, 0x88, 0x53, 0xd8, 0xc8, 0xbc, 0x22, 0xe8, - 0x61, 0x81, 0x99, 0x7d, 0x5b, 0xee, 0x0a, 0xf3, 0x2d, 0xc0, 0xbc, 0x57, 0x90, 0x5d, 0x20, 0x66, - 0xba, 0xca, 0x7e, 0xb8, 0xd4, 0x97, 0x06, 0xba, 0x80, 0xad, 0x42, 0x3b, 0xa0, 0x83, 0xc5, 0x15, - 0xb9, 0xe6, 0xb2, 0x0f, 0xdf, 0x4f, 0xd0, 0x71, 0x3b, 0xd6, 0x9b, 0x9b, 0x86, 0xf1, 0xf6, 0xa6, - 0x61, 0xfc, 0x7b, 0xd3, 0x30, 0x7e, 0xbb, 0x6d, 0xac, 0xbc, 0xbd, 0x6d, 0xac, 0xfc, 0x7d, 0xdb, - 0x58, 0x19, 0xac, 0x89, 0x7f, 0x64, 0x4f, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x16, 0x56, 0x2a, - 0xba, 0xfa, 0x09, 0x00, 0x00, + 0x45, 0xb1, 0xbf, 0x22, 0xe9, 0x9f, 0x43, 0x73, 0x12, 0xe7, 0x9b, 0x6f, 0x67, 0x67, 0xbe, 0x9d, + 0xd9, 0x15, 0x6c, 0x24, 0xa7, 0xa3, 0x5d, 0x46, 0x26, 0x09, 0x4d, 0x86, 0xf2, 0xb7, 0x93, 0xa4, + 0x94, 0x51, 0x54, 0x57, 0xa0, 0x7b, 0x9f, 0xa5, 0x38, 0x20, 0xbb, 0x67, 0x4f, 0x76, 0xc5, 0x87, + 0x74, 0xbb, 0x8f, 0x46, 0x11, 0x3b, 0x99, 0x0e, 0x3b, 0x01, 0x9d, 0xec, 0x8e, 0xe8, 0x88, 0xee, + 0x0a, 0x78, 0x38, 0x7d, 0x2d, 0x2c, 0x61, 0x88, 0x2f, 0x49, 0xf7, 0x7e, 0xb2, 0x60, 0xf5, 0x88, + 0x2f, 0xef, 0x5d, 0x1c, 0xec, 0xf9, 0xe4, 0xcd, 0x94, 0x64, 0x0c, 0x39, 0x50, 0x17, 0x21, 0x0f, + 0xf6, 0x1c, 0x6b, 0xdb, 0x6a, 0x37, 0x7c, 0x6d, 0xa2, 0x16, 0xc0, 0x70, 0x4c, 0x83, 0xd3, 0x01, + 0xc3, 0x29, 0x73, 0x2a, 0xdb, 0x56, 0xdb, 0xf6, 0x73, 0x08, 0x72, 0x61, 0x51, 0x58, 0xfb, 0x71, + 0xe8, 0x54, 0x85, 0xd7, 0xd8, 0x68, 0x13, 0xec, 0x37, 0x53, 0x92, 0x5e, 0xf4, 0x69, 0x48, 0x9c, + 0x79, 0xe1, 0x9c, 0x01, 0x5e, 0x0c, 0x6b, 0xb9, 0x3c, 0xb2, 0x84, 0xc6, 0x19, 0x41, 0x3b, 0x30, + 0x2f, 0x76, 0x16, 0x69, 0x2c, 0x75, 0x97, 0x3b, 0xaa, 0xf6, 0x8e, 0xa0, 0xfa, 0xd2, 0x89, 0x9e, + 0x42, 0x7d, 0x42, 0x58, 0x1a, 0x05, 0x99, 0xc8, 0x68, 0xa9, 0xfb, 0x51, 0x91, 0xc7, 0x43, 0xf6, + 0x25, 0xc1, 0xd7, 0x4c, 0xef, 0xf3, 0x5c, 0xdd, 0xca, 0x89, 0x3c, 0x68, 0xbc, 0xc6, 0xd1, 0x98, + 0x84, 0x3d, 0x9e, 0x73, 0x26, 0x76, 0x6d, 0xfa, 0x05, 0xcc, 0xfb, 0xa5, 0x02, 0xcd, 0x01, 0xc1, + 0x69, 0x70, 0xa2, 0xd5, 0x7a, 0x01, 0xb5, 0x23, 0x3c, 0xe2, 0xec, 0x6a, 0x7b, 0xa9, 0xbb, 0x6d, + 0xf6, 0x2e, 0xb0, 0x3a, 0x9c, 0xb2, 0x1f, 0xb3, 0xf4, 0xa2, 0x57, 0x7b, 0xfb, 0x7e, 0x6b, 0xce, + 0x17, 0x6b, 0xd0, 0x0e, 0x34, 0xfb, 0x51, 0xbc, 0x37, 0x4d, 0x31, 0x8b, 0x68, 0xdc, 0x97, 0x05, + 0x34, 0xfd, 0x22, 0x28, 0x58, 0xf8, 0x3c, 0xc7, 0xaa, 0x2a, 0x56, 0x1e, 0x44, 0xf7, 0x61, 0xfe, + 0xbb, 0x68, 0x12, 0x31, 0xa7, 0x26, 0xbc, 0xd2, 0xe0, 0x68, 0x26, 0x0e, 0x6b, 0x5e, 0xa2, 0xc2, + 0x40, 0xab, 0x50, 0x25, 0x71, 0xe8, 0x2c, 0x08, 0x8c, 0x7f, 0xba, 0xcf, 0xc1, 0x36, 0x29, 0x72, + 0xf7, 0x29, 0xb9, 0x10, 0xf5, 0xdb, 0x3e, 0xff, 0xe4, 0x61, 0xce, 0xf0, 0x78, 0x4a, 0xd4, 0x99, + 0x4b, 0xe3, 0x45, 0xe5, 0x0b, 0xcb, 0xfb, 0xa3, 0x02, 0x48, 0x96, 0x2a, 0x14, 0xd2, 0xaa, 0x3c, + 0x03, 0x3b, 0xd3, 0x02, 0xa8, 0xe3, 0x5b, 0xbf, 0x59, 0x1a, 0x7f, 0x46, 0xe4, 0x9d, 0x27, 0xfa, + 0xe5, 0x60, 0x4f, 0x6d, 0xa4, 0x4d, 0xde, 0x3d, 0x22, 0xf5, 0x43, 0x3c, 0x22, 0xaa, 0xfe, 0x19, + 0xc0, 0x15, 0x4a, 0xf0, 0x88, 0x64, 0x47, 0x54, 0x86, 0x56, 0x1a, 0x14, 0x41, 0xde, 0x9d, 0x24, + 0x0e, 0x68, 0x18, 0xc5, 0x23, 0xd5, 0x80, 0xc6, 0xe6, 0x11, 0xa2, 0x38, 0x24, 0xe7, 0x3c, 0xdc, + 0x20, 0xfa, 0x91, 0x28, 0x6d, 0x8a, 0x20, 0xef, 0x10, 0x46, 0x19, 0x1e, 0xfb, 0x24, 0xa0, 0x69, + 0x98, 0x39, 0x75, 0xd9, 0x21, 0x79, 0x8c, 0x73, 0x42, 0xcc, 0xf0, 0xbe, 0xde, 0x69, 0x51, 0xec, + 0x54, 0xc0, 0x78, 0x9d, 0x67, 0x24, 0xcd, 0x22, 0x1a, 0x3b, 0xb6, 0xac, 0x53, 0x99, 0xde, 0x39, + 0x2c, 0x6b, 0x75, 0xd4, 0x10, 0x3c, 0x83, 0x05, 0xd1, 0xe7, 0xba, 0xc3, 0x36, 0x8b, 0xdd, 0x2d, + 0xd9, 0x7d, 0xc2, 0x30, 0xdf, 0xc1, 0x57, 0x5c, 0xf4, 0xb8, 0x3c, 0x14, 0x65, 0xf5, 0xaf, 0x4d, + 0xc4, 0x9f, 0x16, 0xdc, 0xbb, 0x21, 0x62, 0xf9, 0x36, 0xb0, 0x67, 0xb7, 0x41, 0x1b, 0x56, 0x52, + 0x4a, 0xd9, 0x80, 0xa4, 0x67, 0x51, 0x40, 0x5e, 0xe1, 0x89, 0x6e, 0x8f, 0x32, 0xcc, 0xd5, 0xe5, + 0x90, 0x08, 0x2f, 0x78, 0xf2, 0x72, 0x28, 0x82, 0xe8, 0x53, 0x58, 0x13, 0x47, 0x7a, 0x14, 0x4d, + 0xc8, 0x0f, 0x71, 0x74, 0xfe, 0x0a, 0xc7, 0x54, 0x9c, 0x64, 0xcd, 0xbf, 0xee, 0xe0, 0x77, 0x51, + 0x38, 0x1b, 0x09, 0xd9, 0xde, 0x39, 0xc4, 0xfb, 0xdd, 0xd2, 0x93, 0xaa, 0xe7, 0xbb, 0x0d, 0x2b, + 0x51, 0x9c, 0x25, 0x24, 0x60, 0x24, 0x3c, 0xd2, 0x92, 0xf2, 0x65, 0x65, 0x18, 0x7d, 0x0c, 0xcb, + 0x06, 0xea, 0x5d, 0x30, 0x22, 0x45, 0xac, 0xf9, 0x25, 0xb4, 0x10, 0x51, 0x5d, 0x1a, 0xd5, 0x52, + 0x44, 0x09, 0x73, 0x05, 0xb2, 0xd3, 0x28, 0x49, 0x0c, 0x4f, 0x75, 0x68, 0x01, 0xf4, 0xee, 0xc1, + 0x9a, 0x4c, 0x99, 0xcf, 0xa2, 0x9a, 0x0f, 0xef, 0xb1, 0x1e, 0x30, 0x09, 0xaa, 0xb6, 0x70, 0x61, + 0x91, 0xe1, 0x11, 0xd7, 0x4d, 0x36, 0x86, 0xed, 0x1b, 0xdb, 0xeb, 0xc2, 0xba, 0x59, 0x71, 0xcc, + 0x27, 0x35, 0xcb, 0x5f, 0xed, 0x92, 0x65, 0x0e, 0x53, 0x9a, 0xde, 0x73, 0xd8, 0xb8, 0xb6, 0x46, + 0x6d, 0xb5, 0x09, 0x36, 0xd3, 0xa0, 0xda, 0x6b, 0x06, 0x78, 0x3d, 0x98, 0x17, 0xaa, 0xa1, 0x2f, + 0xa1, 0x3e, 0xc4, 0x2c, 0x38, 0x31, 0x9d, 0xba, 0x65, 0x5a, 0x4e, 0xbe, 0x50, 0x67, 0x4f, 0x3a, + 0x3e, 0xc9, 0xe8, 0x34, 0x0d, 0xc8, 0x20, 0xc1, 0x71, 0xe6, 0x6b, 0xbe, 0xb7, 0x0c, 0x8d, 0xc3, + 0x69, 0x66, 0x7a, 0xde, 0xfb, 0xcd, 0x82, 0x55, 0x0e, 0x08, 0x95, 0x75, 0xee, 0x8f, 0xcc, 0x20, + 0x54, 0xb6, 0xab, 0xed, 0x46, 0xef, 0x01, 0xbf, 0x48, 0xff, 0x7e, 0xbf, 0xd5, 0x3c, 0x4c, 0x09, + 0x1e, 0x8f, 0x69, 0x20, 0xd9, 0x7a, 0x02, 0x3e, 0x81, 0x6a, 0x14, 0xf2, 0xf3, 0xb8, 0x83, 0xcb, + 0x19, 0xe8, 0x33, 0x00, 0x79, 0x03, 0xed, 0x61, 0x86, 0x9d, 0xda, 0x5d, 0xfc, 0x1c, 0xd1, 0xeb, + 0xcb, 0x14, 0x65, 0x25, 0x2a, 0xc5, 0x0f, 0x90, 0x60, 0x07, 0x40, 0x3d, 0x48, 0xbc, 0xb1, 0xd6, + 0x0b, 0x43, 0xdf, 0xd0, 0x45, 0x75, 0x7f, 0xb6, 0x60, 0x81, 0xef, 0x4a, 0x52, 0xf4, 0x15, 0xd8, + 0x46, 0x22, 0x34, 0x7b, 0xf2, 0xca, 0xb2, 0xb9, 0x0f, 0x0a, 0x2e, 0x23, 0xf1, 0x1c, 0xfa, 0x1a, + 0x96, 0x0c, 0xf9, 0xb8, 0xfb, 0x7f, 0x42, 0x74, 0x07, 0xb0, 0xaa, 0x86, 0xeb, 0x25, 0x89, 0x49, + 0x8a, 0x19, 0x35, 0x79, 0x89, 0xf2, 0x4a, 0x41, 0xf3, 0x5a, 0xdd, 0x1e, 0xf4, 0xdf, 0x0a, 0xd4, + 0xbf, 0x9f, 0x92, 0x34, 0x22, 0x29, 0xfa, 0x16, 0x9a, 0xdf, 0x44, 0x71, 0x68, 0x9e, 0x6a, 0x74, + 0xc3, 0xdb, 0xae, 0x03, 0xba, 0x37, 0xb9, 0x72, 0xd5, 0x36, 0xf4, 0xc5, 0x1a, 0x90, 0x98, 0xa1, + 0x5b, 0x5e, 0x23, 0x77, 0xe3, 0x1a, 0x6e, 0x42, 0xec, 0xc3, 0x52, 0xee, 0xa5, 0x43, 0x0f, 0x4b, + 0xcc, 0xfc, 0xfb, 0x77, 0x57, 0x98, 0x97, 0x00, 0xb3, 0x79, 0x46, 0x6e, 0x89, 0x98, 0x9b, 0x7c, + 0xf7, 0xe1, 0x8d, 0x3e, 0x13, 0xe8, 0x18, 0x56, 0x4a, 0x23, 0x8b, 0xb6, 0xae, 0xaf, 0x28, 0x5c, + 0x00, 0xee, 0xf6, 0xed, 0x04, 0x1d, 0xb7, 0xe7, 0xbc, 0xbd, 0x6c, 0x59, 0xef, 0x2e, 0x5b, 0xd6, + 0x3f, 0x97, 0x2d, 0xeb, 0xd7, 0xab, 0xd6, 0xdc, 0xbb, 0xab, 0xd6, 0xdc, 0x5f, 0x57, 0xad, 0xb9, + 0xe1, 0x82, 0xf8, 0xd7, 0xf8, 0xf4, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x04, 0xa5, 0x4b, 0xf7, + 0x9e, 0x0a, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1133,6 +1181,78 @@ var _Pusher_serviceDesc = grpc.ServiceDesc{ Metadata: "pkg/tempopb/tempo.proto", } +// MetricsGeneratorClient is the client API for MetricsGenerator service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricsGeneratorClient interface { + PushSpans(ctx context.Context, in *PushSpansRequest, opts ...grpc.CallOption) (*PushResponse, error) +} + +type metricsGeneratorClient struct { + cc *grpc.ClientConn +} + +func NewMetricsGeneratorClient(cc *grpc.ClientConn) MetricsGeneratorClient { + return &metricsGeneratorClient{cc} +} + +func (c *metricsGeneratorClient) PushSpans(ctx context.Context, in *PushSpansRequest, opts ...grpc.CallOption) (*PushResponse, error) { + out := new(PushResponse) + err := c.cc.Invoke(ctx, "/tempopb.MetricsGenerator/PushSpans", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetricsGeneratorServer is the server API for MetricsGenerator service. +type MetricsGeneratorServer interface { + PushSpans(context.Context, *PushSpansRequest) (*PushResponse, error) +} + +// UnimplementedMetricsGeneratorServer can be embedded to have forward compatible implementations. +type UnimplementedMetricsGeneratorServer struct { +} + +func (*UnimplementedMetricsGeneratorServer) PushSpans(ctx context.Context, req *PushSpansRequest) (*PushResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PushSpans not implemented") +} + +func RegisterMetricsGeneratorServer(s *grpc.Server, srv MetricsGeneratorServer) { + s.RegisterService(&_MetricsGenerator_serviceDesc, srv) +} + +func _MetricsGenerator_PushSpans_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PushSpansRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricsGeneratorServer).PushSpans(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tempopb.MetricsGenerator/PushSpans", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricsGeneratorServer).PushSpans(ctx, req.(*PushSpansRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _MetricsGenerator_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tempopb.MetricsGenerator", + HandlerType: (*MetricsGeneratorServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "PushSpans", + Handler: _MetricsGenerator_PushSpans_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "pkg/tempopb/tempo.proto", +} + // QuerierClient is the client API for Querier service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. @@ -2013,6 +2133,43 @@ func (m *PushBytesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PushSpansRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PushSpansRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PushSpansRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Batches) > 0 { + for iNdEx := len(m.Batches) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Batches[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTempo(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *TraceBytes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2353,6 +2510,21 @@ func (m *PushBytesRequest) Size() (n int) { return n } +func (m *PushSpansRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Batches) > 0 { + for _, e := range m.Batches { + l = e.Size() + n += 1 + l + sovTempo(uint64(l)) + } + } + return n +} + func (m *TraceBytes) Size() (n int) { if m == nil { return 0 @@ -4322,6 +4494,90 @@ func (m *PushBytesRequest) Unmarshal(dAtA []byte) error { } return nil } +func (m *PushSpansRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PushSpansRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PushSpansRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Batches", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTempo + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTempo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Batches = append(m.Batches, &v1.ResourceSpans{}) + if err := m.Batches[len(m.Batches)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTempo(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTempo + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TraceBytes) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pkg/tempopb/tempo.proto b/pkg/tempopb/tempo.proto index cbc17082227..af59a2e1d15 100644 --- a/pkg/tempopb/tempo.proto +++ b/pkg/tempopb/tempo.proto @@ -11,6 +11,10 @@ service Pusher { rpc PushBytesV2(PushBytesRequest) returns (PushResponse) {}; // ./pkg/model/v2 } +service MetricsGenerator { + rpc PushSpans(PushSpansRequest) returns (PushResponse) {}; +} + service Querier { rpc FindTraceByID(TraceByIDRequest) returns (TraceByIDResponse) {}; rpc SearchRecent(SearchRequest) returns (SearchResponse) {}; @@ -115,6 +119,11 @@ message PushBytesRequest { repeated bytes searchData = 4 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocBytes"]; } +message PushSpansRequest { + // just send entire OTel spans for now + repeated tempopb.trace.v1.ResourceSpans batches = 1; +} + message TraceBytes { // pre-marshalled Traces repeated bytes traces = 1; diff --git a/pkg/util/test/req.go b/pkg/util/test/req.go index fbe6b02fec9..1a04d46fba5 100644 --- a/pkg/util/test/req.go +++ b/pkg/util/test/req.go @@ -2,17 +2,27 @@ package test import ( "math/rand" + "time" + "github.com/gogo/protobuf/proto" "github.com/grafana/tempo/pkg/tempopb" v1_common "github.com/grafana/tempo/pkg/tempopb/common/v1" + v1_resource "github.com/grafana/tempo/pkg/tempopb/resource/v1" v1_trace "github.com/grafana/tempo/pkg/tempopb/trace/v1" ) func MakeSpan(traceID []byte) *v1_trace.Span { + now := time.Now() s := &v1_trace.Span{ Name: "test", TraceId: traceID, SpanId: make([]byte, 8), + Kind: v1_trace.Span_SPAN_KIND_CLIENT, + Status: &v1_trace.Status{ + Code: 1, + }, + StartTimeUnixNano: uint64(now.UnixNano()), + EndTimeUnixNano: uint64(now.Add(time.Second).UnixNano()), } rand.Read(s.SpanId) return s @@ -21,7 +31,20 @@ func MakeSpan(traceID []byte) *v1_trace.Span { func MakeBatch(spans int, traceID []byte) *v1_trace.ResourceSpans { traceID = ValidTraceID(traceID) - batch := &v1_trace.ResourceSpans{} + batch := &v1_trace.ResourceSpans{ + Resource: &v1_resource.Resource{ + Attributes: []*v1_common.KeyValue{ + { + Key: "service.name", + Value: &v1_common.AnyValue{ + Value: &v1_common.AnyValue_StringValue{ + StringValue: "test-service", + }, + }, + }, + }, + }, + } var ils *v1_trace.InstrumentationLibrarySpans for i := 0; i < spans; i++ { @@ -56,6 +79,27 @@ func MakeTrace(requests int, traceID []byte) *tempopb.Trace { return trace } +func MakeTraceBytes(requests int, traceID []byte) *tempopb.TraceBytes { + trace := &tempopb.Trace{ + Batches: make([]*v1_trace.ResourceSpans, 0), + } + + for i := 0; i < requests; i++ { + trace.Batches = append(trace.Batches, MakeBatch(rand.Int()%20+1, traceID)) + } + + bytes, err := proto.Marshal(trace) + if err != nil { + panic(err) + } + + traceBytes := &tempopb.TraceBytes{ + Traces: [][]byte{bytes}, + } + + return traceBytes +} + func MakeTraceWithSpanCount(requests int, spansEach int, traceID []byte) *tempopb.Trace { trace := &tempopb.Trace{ Batches: make([]*v1_trace.ResourceSpans, 0), diff --git a/tempodb/compactor_test.go b/tempodb/compactor_test.go index f085bde5003..a1a1aa93ae6 100644 --- a/tempodb/compactor_test.go +++ b/tempodb/compactor_test.go @@ -123,8 +123,8 @@ func TestCompaction(t *testing.T) { _, err = w.CompleteBlock(head, &mockSharder{}) require.NoError(t, err) - //err = w.WriteBlock(context.Background(), complete) - //assert.NoError(t, err) + // err = w.WriteBlock(context.Background(), complete) + // assert.NoError(t, err) } rw := r.(*readerWriter) @@ -238,7 +238,7 @@ func TestSameIDCompaction(t *testing.T) { reqs := make([][]byte, 0, requestShards) for j := 0; j < requestShards; j++ { - buff, err := proto.Marshal(test.MakeTrace(1, id)) + buff, err := proto.Marshal(test.MakeTraceBytes(1, id)) require.NoError(t, err) reqs = append(reqs, buff) } @@ -523,7 +523,7 @@ func cutTestBlocks(t testing.TB, w Writer, tenantID string, blockCount int, reco err = head.Append( makeTraceID(i, j), body) - //[]byte{0x01, 0x02, 0x03}) + // []byte{0x01, 0x02, 0x03}) require.NoError(t, err, "unexpected error writing rec") } diff --git a/vendor/github.com/alecthomas/units/COPYING b/vendor/github.com/alecthomas/units/COPYING new file mode 100644 index 00000000000..2993ec085d3 --- /dev/null +++ b/vendor/github.com/alecthomas/units/COPYING @@ -0,0 +1,19 @@ +Copyright (C) 2014 Alec Thomas + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/alecthomas/units/README.md b/vendor/github.com/alecthomas/units/README.md new file mode 100644 index 00000000000..57b458aef31 --- /dev/null +++ b/vendor/github.com/alecthomas/units/README.md @@ -0,0 +1,13 @@ +[![Go Reference](https://pkg.go.dev/badge/github.com/alecthomas/units.svg)](https://pkg.go.dev/github.com/alecthomas/units) + +# Units - Helpful unit multipliers and functions for Go + +The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package. + +It allows for code like this: + +```go +n, err := ParseBase2Bytes("1KB") +// n == 1024 +n = units.Mebibyte * 512 +``` diff --git a/vendor/github.com/alecthomas/units/bytes.go b/vendor/github.com/alecthomas/units/bytes.go new file mode 100644 index 00000000000..2683620d7be --- /dev/null +++ b/vendor/github.com/alecthomas/units/bytes.go @@ -0,0 +1,135 @@ +package units + +// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte, +// etc.). +type Base2Bytes int64 + +// Base-2 byte units. +const ( + Kibibyte Base2Bytes = 1024 + KiB = Kibibyte + Mebibyte = Kibibyte * 1024 + MiB = Mebibyte + Gibibyte = Mebibyte * 1024 + GiB = Gibibyte + Tebibyte = Gibibyte * 1024 + TiB = Tebibyte + Pebibyte = Tebibyte * 1024 + PiB = Pebibyte + Exbibyte = Pebibyte * 1024 + EiB = Exbibyte +) + +var ( + bytesUnitMap = MakeUnitMap("iB", "B", 1024) + oldBytesUnitMap = MakeUnitMap("B", "B", 1024) +) + +// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB +// and KiB are both 1024. +// However "kB", which is the correct SI spelling of 1000 Bytes, is rejected. +func ParseBase2Bytes(s string) (Base2Bytes, error) { + n, err := ParseUnit(s, bytesUnitMap) + if err != nil { + n, err = ParseUnit(s, oldBytesUnitMap) + } + return Base2Bytes(n), err +} + +func (b Base2Bytes) String() string { + return ToString(int64(b), 1024, "iB", "B") +} + +// MarshalText implement encoding.TextMarshaler to process json/yaml. +func (b Base2Bytes) MarshalText() ([]byte, error) { + return []byte(b.String()), nil +} + +// UnmarshalText implement encoding.TextUnmarshaler to process json/yaml. +func (b *Base2Bytes) UnmarshalText(text []byte) error { + n, err := ParseBase2Bytes(string(text)) + *b = n + return err +} + +// Floor returns Base2Bytes with all but the largest unit zeroed out. So that e.g. 1GiB1MiB1KiB → 1GiB. +func (b Base2Bytes) Floor() Base2Bytes { + switch { + case b > Exbibyte: + return (b / Exbibyte) * Exbibyte + case b > Pebibyte: + return (b / Pebibyte) * Pebibyte + case b > Tebibyte: + return (b / Tebibyte) * Tebibyte + case b > Gibibyte: + return (b / Gibibyte) * Gibibyte + case b > Mebibyte: + return (b / Mebibyte) * Mebibyte + case b > Kibibyte: + return (b / Kibibyte) * Kibibyte + default: + return b + } +} + +var metricBytesUnitMap = MakeUnitMap("B", "B", 1000) + +// MetricBytes are SI byte units (1000 bytes in a kilobyte). +type MetricBytes SI + +// SI base-10 byte units. +const ( + Kilobyte MetricBytes = 1000 + KB = Kilobyte + Megabyte = Kilobyte * 1000 + MB = Megabyte + Gigabyte = Megabyte * 1000 + GB = Gigabyte + Terabyte = Gigabyte * 1000 + TB = Terabyte + Petabyte = Terabyte * 1000 + PB = Petabyte + Exabyte = Petabyte * 1000 + EB = Exabyte +) + +// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes. +func ParseMetricBytes(s string) (MetricBytes, error) { + n, err := ParseUnit(s, metricBytesUnitMap) + return MetricBytes(n), err +} + +// TODO: represents 1000B as uppercase "KB", while SI standard requires "kB". +func (m MetricBytes) String() string { + return ToString(int64(m), 1000, "B", "B") +} + +// Floor returns MetricBytes with all but the largest unit zeroed out. So that e.g. 1GB1MB1KB → 1GB. +func (b MetricBytes) Floor() MetricBytes { + switch { + case b > Exabyte: + return (b / Exabyte) * Exabyte + case b > Petabyte: + return (b / Petabyte) * Petabyte + case b > Terabyte: + return (b / Terabyte) * Terabyte + case b > Gigabyte: + return (b / Gigabyte) * Gigabyte + case b > Megabyte: + return (b / Megabyte) * Megabyte + case b > Kilobyte: + return (b / Kilobyte) * Kilobyte + default: + return b + } +} + +// ParseStrictBytes supports both iB and B suffixes for base 2 and metric, +// respectively. That is, KiB represents 1024 and kB, KB represent 1000. +func ParseStrictBytes(s string) (int64, error) { + n, err := ParseUnit(s, bytesUnitMap) + if err != nil { + n, err = ParseUnit(s, metricBytesUnitMap) + } + return int64(n), err +} diff --git a/vendor/github.com/alecthomas/units/doc.go b/vendor/github.com/alecthomas/units/doc.go new file mode 100644 index 00000000000..156ae386723 --- /dev/null +++ b/vendor/github.com/alecthomas/units/doc.go @@ -0,0 +1,13 @@ +// Package units provides helpful unit multipliers and functions for Go. +// +// The goal of this package is to have functionality similar to the time [1] package. +// +// +// [1] http://golang.org/pkg/time/ +// +// It allows for code like this: +// +// n, err := ParseBase2Bytes("1KB") +// // n == 1024 +// n = units.Mebibyte * 512 +package units diff --git a/vendor/github.com/alecthomas/units/si.go b/vendor/github.com/alecthomas/units/si.go new file mode 100644 index 00000000000..99b2fa4fcb0 --- /dev/null +++ b/vendor/github.com/alecthomas/units/si.go @@ -0,0 +1,50 @@ +package units + +// SI units. +type SI int64 + +// SI unit multiples. +const ( + Kilo SI = 1000 + Mega = Kilo * 1000 + Giga = Mega * 1000 + Tera = Giga * 1000 + Peta = Tera * 1000 + Exa = Peta * 1000 +) + +func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 { + res := map[string]float64{ + shortSuffix: 1, + // see below for "k" / "K" + "M" + suffix: float64(scale * scale), + "G" + suffix: float64(scale * scale * scale), + "T" + suffix: float64(scale * scale * scale * scale), + "P" + suffix: float64(scale * scale * scale * scale * scale), + "E" + suffix: float64(scale * scale * scale * scale * scale * scale), + } + + // Standard SI prefixes use lowercase "k" for kilo = 1000. + // For compatibility, and to be fool-proof, we accept both "k" and "K" in metric mode. + // + // However, official binary prefixes are always capitalized - "KiB" - + // and we specifically never parse "kB" as 1024B because: + // + // (1) people pedantic enough to use lowercase according to SI unlikely to abuse "k" to mean 1024 :-) + // + // (2) Use of capital K for 1024 was an informal tradition predating IEC prefixes: + // "The binary meaning of the kilobyte for 1024 bytes typically uses the symbol KB, with an + // uppercase letter K." + // -- https://en.wikipedia.org/wiki/Kilobyte#Base_2_(1024_bytes) + // "Capitalization of the letter K became the de facto standard for binary notation, although this + // could not be extended to higher powers, and use of the lowercase k did persist.[13][14][15]" + // -- https://en.wikipedia.org/wiki/Binary_prefix#History + // See also the extensive https://en.wikipedia.org/wiki/Timeline_of_binary_prefixes. + if scale == 1024 { + res["K"+suffix] = float64(scale) + } else { + res["k"+suffix] = float64(scale) + res["K"+suffix] = float64(scale) + } + return res +} diff --git a/vendor/github.com/alecthomas/units/util.go b/vendor/github.com/alecthomas/units/util.go new file mode 100644 index 00000000000..6527e92d164 --- /dev/null +++ b/vendor/github.com/alecthomas/units/util.go @@ -0,0 +1,138 @@ +package units + +import ( + "errors" + "fmt" + "strings" +) + +var ( + siUnits = []string{"", "K", "M", "G", "T", "P", "E"} +) + +func ToString(n int64, scale int64, suffix, baseSuffix string) string { + mn := len(siUnits) + out := make([]string, mn) + for i, m := range siUnits { + if n%scale != 0 || i == 0 && n == 0 { + s := suffix + if i == 0 { + s = baseSuffix + } + out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s) + } + n /= scale + if n == 0 { + break + } + } + return strings.Join(out, "") +} + +// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123 +var errLeadingInt = errors.New("units: bad [0-9]*") // never printed + +// leadingInt consumes the leading [0-9]* from s. +func leadingInt(s string) (x int64, rem string, err error) { + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + break + } + if x >= (1<<63-10)/10 { + // overflow + return 0, "", errLeadingInt + } + x = x*10 + int64(c) - '0' + } + return x, s[i:], nil +} + +func ParseUnit(s string, unitMap map[string]float64) (int64, error) { + // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+ + orig := s + f := float64(0) + neg := false + + // Consume [-+]? + if s != "" { + c := s[0] + if c == '-' || c == '+' { + neg = c == '-' + s = s[1:] + } + } + // Special case: if all that is left is "0", this is zero. + if s == "0" { + return 0, nil + } + if s == "" { + return 0, errors.New("units: invalid " + orig) + } + for s != "" { + g := float64(0) // this element of the sequence + + var x int64 + var err error + + // The next character must be [0-9.] + if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) { + return 0, errors.New("units: invalid " + orig) + } + // Consume [0-9]* + pl := len(s) + x, s, err = leadingInt(s) + if err != nil { + return 0, errors.New("units: invalid " + orig) + } + g = float64(x) + pre := pl != len(s) // whether we consumed anything before a period + + // Consume (\.[0-9]*)? + post := false + if s != "" && s[0] == '.' { + s = s[1:] + pl := len(s) + x, s, err = leadingInt(s) + if err != nil { + return 0, errors.New("units: invalid " + orig) + } + scale := 1.0 + for n := pl - len(s); n > 0; n-- { + scale *= 10 + } + g += float64(x) / scale + post = pl != len(s) + } + if !pre && !post { + // no digits (e.g. ".s" or "-.s") + return 0, errors.New("units: invalid " + orig) + } + + // Consume unit. + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c == '.' || ('0' <= c && c <= '9') { + break + } + } + u := s[:i] + s = s[i:] + unit, ok := unitMap[u] + if !ok { + return 0, errors.New("units: unknown unit " + u + " in " + orig) + } + + f += g * unit + } + + if neg { + f = -f + } + if f < float64(-1<<63) || f > float64(1<<63-1) { + return 0, errors.New("units: overflow parsing unit") + } + return int64(f), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 00000000000..36a915efea8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,232 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be added to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 5 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(5 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects + } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() + } + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all request errors, and let the default retrier determine + // if the error is retryable. + r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", r.Error) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } + }} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + // Was any endpoint provided by the user, or one was derived by the + // SDK's endpoint resolver? + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 00000000000..7d50b1557cc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 00000000000..ab69c7a6f38 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,37 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec-env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 00000000000..92af5b7250a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,188 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + return m.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + credsList, err := requestCredList(ctx, m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(ctx, m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "iam/security-credentials/" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New(request.ErrCodeSerialization, + "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New(request.ErrCodeSerialization, + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 00000000000..785f30d8e6c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,210 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + resp, err := p.getCredentials(ctx) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.SetContext(ctx) + req.HTTPRequest.Header.Set("Accept", "application/json") + if authToken := p.AuthorizationToken; len(authToken) != 0 { + req.HTTPRequest.Header.Set("Authorization", authToken) + } + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go new file mode 100644 index 00000000000..e6248360029 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -0,0 +1,426 @@ +/* +Package processcreds is a credential Provider to retrieve `credential_process` +credentials. + +WARNING: The following describes a method of sourcing credentials from an external +process. This can potentially be dangerous, so proceed with caution. Other +credential providers should be preferred if at all possible. If using this +option, you should make sure that the config file is as locked down as possible +using security best practices for your operating system. + +You can use credentials from a `credential_process` in a variety of ways. + +One way is to setup your shared config file, located in the default +location, with the `credential_process` key and the command you want to be +called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. + + [default] + credential_process = /command/to/call + +Creating a new session will use the credential process to retrieve credentials. +NOTE: If there are credentials in the profile you are using, the credential +process will not be used. + + // Initialize a session to load credentials. + sess, _ := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1")}, + ) + + // Create S3 service client to use the credentials. + svc := s3.New(sess) + +Another way to use the `credential_process` method is by using +`credentials.NewCredentials()` and providing a command to be executed to +retrieve credentials: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentials("/path/to/command") + + // Create service client value configured for credentials. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +You can set a non-default timeout for the `credential_process` with another +constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To +set a one minute timeout: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentialsTimeout( + "/path/to/command", + time.Duration(500) * time.Millisecond) + +If you need more control, you can set any configurable options in the +credentials using one or more option functions. For example, you can set a two +minute timeout, a credential duration of 60 minutes, and a maximum stdout +buffer size of 2k. + + creds := processcreds.NewCredentials( + "/path/to/command", + func(opt *ProcessProvider) { + opt.Timeout = time.Duration(2) * time.Minute + opt.Duration = time.Duration(60) * time.Minute + opt.MaxBufSize = 2048 + }) + +You can also use your own `exec.Cmd`: + + // Create an exec.Cmd + myCommand := exec.Command("/path/to/command") + + // Create credentials using your exec.Cmd and custom timeout + creds := processcreds.NewCredentialsCommand( + myCommand, + func(opt *processcreds.ProcessProvider) { + opt.Timeout = time.Duration(1) * time.Second + }) +*/ +package processcreds + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // ErrCodeProcessProviderParse error parsing process output + ErrCodeProcessProviderParse = "ProcessProviderParseError" + + // ErrCodeProcessProviderVersion version error in output + ErrCodeProcessProviderVersion = "ProcessProviderVersionError" + + // ErrCodeProcessProviderRequired required attribute missing in output + ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" + + // ErrCodeProcessProviderExecution execution of command failed + ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" + + // errMsgProcessProviderTimeout process took longer than allowed + errMsgProcessProviderTimeout = "credential process timed out" + + // errMsgProcessProviderProcess process error + errMsgProcessProviderProcess = "error in credential_process" + + // errMsgProcessProviderParse problem parsing output + errMsgProcessProviderParse = "parse failed of credential_process output" + + // errMsgProcessProviderVersion version error in output + errMsgProcessProviderVersion = "wrong version in process output (not 1)" + + // errMsgProcessProviderMissKey missing access key id in output + errMsgProcessProviderMissKey = "missing AccessKeyId in process output" + + // errMsgProcessProviderMissSecret missing secret acess key in output + errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" + + // errMsgProcessProviderPrepareCmd prepare of command failed + errMsgProcessProviderPrepareCmd = "failed to prepare command" + + // errMsgProcessProviderEmptyCmd command must not be empty + errMsgProcessProviderEmptyCmd = "command must not be empty" + + // errMsgProcessProviderPipe failed to initialize pipe + errMsgProcessProviderPipe = "failed to initialize pipe" + + // DefaultDuration is the default amount of time in minutes that the + // credentials will be valid for. + DefaultDuration = time.Duration(15) * time.Minute + + // DefaultBufSize limits buffer size from growing to an enormous + // amount due to a faulty process. + DefaultBufSize = int(8 * sdkio.KibiByte) + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProcessProvider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type ProcessProvider struct { + staticCreds bool + credentials.Expiry + originalCommand []string + + // Expiry duration of the credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // A string representing an os command that should return a JSON with + // credential information. + command *exec.Cmd + + // MaxBufSize limits memory usage from growing to an enormous + // amount due to a faulty process. + MaxBufSize int + + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// ProcessProvider. The credentials will expire every 15 minutes by default. +func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: exec.Command(command), + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsTimeout returns a pointer to a new Credentials object with +// the specified command and timeout, and default duration and max buffer size. +func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { + p := NewCredentials(command, func(opt *ProcessProvider) { + opt.Timeout = timeout + }) + + return p +} + +// NewCredentialsCommand returns a pointer to a new Credentials object with +// the specified command, and default timeout, duration and max buffer size. +func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: command, + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +type credentialProcessResponse struct { + Version int + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + SessionToken string + Expiration *time.Time +} + +// Retrieve executes the 'credential_process' and returns the credentials. +func (p *ProcessProvider) Retrieve() (credentials.Value, error) { + out, err := p.executeCredentialProcess() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // Serialize and validate response + resp := &credentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderParse, + fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), + err) + } + + if resp.Version != 1 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderVersion, + errMsgProcessProviderVersion, + nil) + } + + if len(resp.AccessKeyID) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissKey, + nil) + } + + if len(resp.SecretAccessKey) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissSecret, + nil) + } + + // Handle expiration + p.staticCreds = resp.Expiration == nil + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } + + return credentials.Value{ + ProviderName: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + }, nil +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *ProcessProvider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// prepareCommand prepares the command to be executed. +func (p *ProcessProvider) prepareCommand() error { + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(p.originalCommand) == 0 { + p.originalCommand = make([]string, len(p.command.Args)) + copy(p.originalCommand, p.command.Args) + + // check for empty command because it succeeds + if len(strings.TrimSpace(p.originalCommand[0])) < 1 { + return awserr.New( + ErrCodeProcessProviderExecution, + fmt.Sprintf( + "%s: %s", + errMsgProcessProviderPrepareCmd, + errMsgProcessProviderEmptyCmd), + nil) + } + } + + cmdArgs = append(cmdArgs, p.originalCommand...) + p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) + p.command.Env = os.Environ() + + return nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { + + if err := p.prepareCommand(); err != nil { + return nil, err + } + + // Setup the pipes + outReadPipe, outWritePipe, err := os.Pipe() + if err != nil { + return nil, awserr.New( + ErrCodeProcessProviderExecution, + errMsgProcessProviderPipe, + err) + } + + p.command.Stderr = os.Stderr // display stderr on console for MFA + p.command.Stdout = outWritePipe // get creds json on process's stdout + p.command.Stdin = os.Stdin // enable stdin for MFA + + output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) + + stdoutCh := make(chan error, 1) + go readInput( + io.LimitReader(outReadPipe, int64(p.MaxBufSize)), + output, + stdoutCh) + + execCh := make(chan error, 1) + go executeCommand(*p.command, execCh) + + finished := false + var errors []error + for !finished { + select { + case readError := <-stdoutCh: + errors = appendError(errors, readError) + finished = true + case execError := <-execCh: + err := outWritePipe.Close() + errors = appendError(errors, err) + errors = appendError(errors, execError) + if errors != nil { + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderProcess, + errors) + } + case <-time.After(p.Timeout): + finished = true + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderTimeout, + errors) // errors can be nil + } + } + + out := output.Bytes() + + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) + } + + return out, nil +} + +// appendError conveniently checks for nil before appending slice +func appendError(errors []error, err error) []error { + if err != nil { + return append(errors, err) + } + return errors +} + +func executeCommand(cmd exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} + +func readInput(r io.Reader, w io.Writer, read chan error) { + tee := io.TeeReader(r, w) + + _, err := ioutil.ReadAll(tee) + + if err == io.EOF { + err = nil + } + + read <- err // will only arrive here when write end of pipe is closed +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go new file mode 100644 index 00000000000..18c940ab3c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go @@ -0,0 +1,60 @@ +// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token. +// +// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider +// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by +// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in +// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned. +// +// Loading AWS SSO credentials with the AWS shared configuration file +// +// You can use configure AWS SSO credentials from the AWS shared configuration file by +// providing the specifying the required keys in the profile: +// +// sso_account_id +// sso_region +// sso_role_name +// sso_start_url +// +// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target +// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be +// provided, or an error will be returned. +// +// [profile devsso] +// sso_start_url = https://my-sso-portal.awsapps.com/start +// sso_role_name = SSOReadOnlyRole +// sso_region = us-east-1 +// sso_account_id = 123456789012 +// +// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to +// retrieve credentials. For example: +// +// sess, err := session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// Profile: "devsso", +// }) +// if err != nil { +// return err +// } +// +// Programmatically loading AWS SSO credentials directly +// +// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information +// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache. +// +// svc := sso.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), // Client Region must correspond to the AWS SSO user portal region +// }) +// +// provider := ssocreds.NewCredentialsWithClient(svc, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start") +// +// credentials, err := provider.Get() +// if err != nil { +// return err +// } +// +// Additional Resources +// +// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +// +// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html +package ssocreds diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go new file mode 100644 index 00000000000..d4df39a7a22 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go @@ -0,0 +1,10 @@ +//go:build !windows +// +build !windows + +package ssocreds + +import "os" + +func getHomeDirectory() string { + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go new file mode 100644 index 00000000000..eb48f61e5bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go @@ -0,0 +1,7 @@ +package ssocreds + +import "os" + +func getHomeDirectory() string { + return os.Getenv("USERPROFILE") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go new file mode 100644 index 00000000000..6eda2a5557f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go @@ -0,0 +1,180 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sso" + "github.com/aws/aws-sdk-go/service/sso/ssoiface" +) + +// ErrCodeSSOProviderInvalidToken is the code type that is returned if loaded token has expired or is otherwise invalid. +// To refresh the SSO session run aws sso login with the corresponding profile. +const ErrCodeSSOProviderInvalidToken = "SSOProviderInvalidToken" + +const invalidTokenMessage = "the SSO session has expired or is invalid" + +func init() { + nowTime = time.Now + defaultCacheLocation = defaultCacheLocationImpl +} + +var nowTime func() time.Time + +// ProviderName is the name of the provider used to specify the source of credentials. +const ProviderName = "SSOProvider" + +var defaultCacheLocation func() string + +func defaultCacheLocationImpl() string { + return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache") +} + +// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token. +type Provider struct { + credentials.Expiry + + // The Client which is configured for the AWS Region where the AWS SSO user portal is located. + Client ssoiface.SSOAPI + + // The AWS account that is assigned to the user. + AccountID string + + // The role name that is assigned to the user. + RoleName string + + // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal. + StartURL string +} + +// NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured +// for the AWS Region where the AWS SSO user portal is located. +func NewCredentials(configProvider client.ConfigProvider, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { + return NewCredentialsWithClient(sso.New(configProvider), accountID, roleName, startURL, optFns...) +} + +// NewCredentialsWithClient returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured +// for the AWS Region where the AWS SSO user portal is located. +func NewCredentialsWithClient(client ssoiface.SSOAPI, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { + p := &Provider{ + Client: client, + AccountID: accountID, + RoleName: roleName, + StartURL: startURL, + } + + for _, fn := range optFns { + fn(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal +// by exchanging the accessToken present in ~/.aws/sso/cache. +func (p *Provider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal +// by exchanging the accessToken present in ~/.aws/sso/cache. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + tokenFile, err := loadTokenFile(p.StartURL) + if err != nil { + return credentials.Value{}, err + } + + output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{ + AccessToken: &tokenFile.AccessToken, + AccountId: &p.AccountID, + RoleName: &p.RoleName, + }) + if err != nil { + return credentials.Value{}, err + } + + expireTime := time.Unix(0, aws.Int64Value(output.RoleCredentials.Expiration)*int64(time.Millisecond)).UTC() + p.SetExpiration(expireTime, 0) + + return credentials.Value{ + AccessKeyID: aws.StringValue(output.RoleCredentials.AccessKeyId), + SecretAccessKey: aws.StringValue(output.RoleCredentials.SecretAccessKey), + SessionToken: aws.StringValue(output.RoleCredentials.SessionToken), + ProviderName: ProviderName, + }, nil +} + +func getCacheFileName(url string) (string, error) { + hash := sha1.New() + _, err := hash.Write([]byte(url)) + if err != nil { + return "", err + } + return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil +} + +type rfc3339 time.Time + +func (r *rfc3339) UnmarshalJSON(bytes []byte) error { + var value string + + if err := json.Unmarshal(bytes, &value); err != nil { + return err + } + + parse, err := time.Parse(time.RFC3339, value) + if err != nil { + return fmt.Errorf("expected RFC3339 timestamp: %v", err) + } + + *r = rfc3339(parse) + + return nil +} + +type token struct { + AccessToken string `json:"accessToken"` + ExpiresAt rfc3339 `json:"expiresAt"` + Region string `json:"region,omitempty"` + StartURL string `json:"startUrl,omitempty"` +} + +func (t token) Expired() bool { + return nowTime().Round(0).After(time.Time(t.ExpiresAt)) +} + +func loadTokenFile(startURL string) (t token, err error) { + key, err := getCacheFileName(startURL) + if err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key)) + if err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + if err := json.Unmarshal(fileBytes, &t); err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + if len(t.AccessToken) == 0 { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) + } + + if t.Expired() { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) + } + + return t, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 00000000000..260a37cbbab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,367 @@ +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +*/ +package stscreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" + "github.com/aws/aws-sdk-go/service/sts" +) + +// StdinTokenProvider will prompt on stderr and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function to read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +type assumeRolerWithContext interface { + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Optional, you can pass tag key-value pairs to your session. These tags are called session tags. + Tags []*sts.Tag + + // A list of keys for session tags that you want to set as transitive. + // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. + TransitiveTagKeys []*string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The ARNs of IAM managed policies you want to use as managed session policies. + // The policies must exist in the same account as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*sts.PolicyDescriptorType + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. + TokenCode *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // MaxJitterFrac reduces the effective Duration of each credential requested + // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must + // have a value between 0 and 1. Any other value may lead to expected behavior. + // With a MaxJitterFrac value of 0, default) will no jitter will be used. + // + // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the + // AssumeRole call will be made with an arbitrary Duration between 27m and + // 30m. + // + // MaxJitterFrac should not be negative. + MaxJitterFrac float64 +} + +// NewCredentials returns a pointer to a new Credentials value wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. The +// Credentials value will attempt to refresh the credentials using the provider +// when Credentials.Get is called, if the cached credentials are expiring. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials value wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. The +// Credentials value will attempt to refresh the credentials using the provider +// when Credentials.Get is called, if the cached credentials are expiring. +// +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + Tags: p.Tags, + PolicyArns: p.PolicyArns, + TransitiveTagKeys: p.TransitiveTagKeys, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } + } + + var roleOutput *sts.AssumeRoleOutput + var err error + + if c, ok := p.Client.(assumeRolerWithContext); ok { + roleOutput, err = c.AssumeRoleWithContext(ctx, input) + } else { + roleOutput, err = p.Client.AssumeRole(input) + } + + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go new file mode 100644 index 00000000000..cefe2a76d4d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,154 @@ +package stscreds + +import ( + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" +) + +const ( + // ErrCodeWebIdentity will be used as an error code when constructing + // a new error to be returned during session creation or retrieval. + ErrCodeWebIdentity = "WebIdentityErr" + + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// now is used to return a time.Time object representing +// the current time. This can be used to easily test and +// compare test values. +var now = time.Now + +// TokenFetcher shuold return WebIdentity token bytes or an error +type TokenFetcher interface { + FetchToken(credentials.Context) ([]byte, error) +} + +// FetchTokenPath is a path to a WebIdentity token file +type FetchTokenPath string + +// FetchToken returns a token by reading from the filesystem +func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { + data, err := ioutil.ReadFile(string(f)) + if err != nil { + errMsg := fmt.Sprintf("unable to read file at %s", f) + return nil, awserr.New(ErrCodeWebIdentity, errMsg, err) + } + return data, nil +} + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + credentials.Expiry + PolicyArns []*sts.PolicyDescriptorType + + // Duration the STS credentials will be valid for. Truncated to seconds. + // If unset, the assumed role will use AssumeRoleWithWebIdentity's default + // expiry duration. See + // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#STS.AssumeRoleWithWebIdentity + // for more information. + Duration time.Duration + + // The amount of time the credentials will be refreshed before they expire. + // This is useful refresh credentials before they expire to reduce risk of + // using credentials as they expire. If unset, will default to no expiry + // window. + ExpiryWindow time.Duration + + client stsiface.STSAPI + + tokenFetcher TokenFetcher + roleARN string + roleSessionName string +} + +// NewWebIdentityCredentials will return a new set of credentials with a given +// configuration, role arn, and token file path. +func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { + svc := sts.New(c) + p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) + return credentials.NewCredentials(p) +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI +func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { + return NewWebIdentityRoleProviderWithToken(svc, roleARN, roleSessionName, FetchTokenPath(path)) +} + +// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI and a TokenFetcher +func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { + return &WebIdentityRoleProvider{ + client: svc, + tokenFetcher: tokenFetcher, + roleARN: roleARN, + roleSessionName: roleSessionName, + } +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + b, err := p.tokenFetcher.FetchToken(ctx) + if err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err) + } + + sessionName := p.roleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(now().UnixNano(), 10) + } + + var duration *int64 + if p.Duration != 0 { + duration = aws.Int64(int64(p.Duration / time.Second)) + } + + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + PolicyArns: p.PolicyArns, + RoleArn: &p.roleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + DurationSeconds: duration, + }) + + req.SetContext(ctx) + + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) + if err := req.Send(); err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) + } + + p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) + + value := credentials.Value{ + AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), + SessionToken: aws.StringValue(resp.Credentials.SessionToken), + ProviderName: WebIdentityProviderName, + } + return value, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 00000000000..25a66d1dda2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,69 @@ +// Package csm provides the Client Side Monitoring (CSM) client which enables +// sending metrics via UDP connection to the CSM agent. This package provides +// control options, and configuration for the CSM client. The client can be +// controlled manually, or automatically via the SDK's Session configuration. +// +// Enabling CSM client via SDK's Session configuration +// +// The CSM client can be enabled automatically via SDK's Session configuration. +// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT +// environment variable is set to a non-empty value. +// +// The configuration options for the CSM client via the SDK's session +// configuration are: +// +// * AWS_CSM_PORT= +// The port number the CSM agent will receive metrics on. +// +// * AWS_CSM_HOST= +// The hostname, or IP address the CSM agent will receive metrics on. +// Without port number. +// +// Manually enabling the CSM client +// +// The CSM client can be started, paused, and resumed manually. The Start +// function will enable the CSM client to publish metrics to the CSM agent. It +// is safe to call Start concurrently, but if Start is called additional times +// with different ClientID or address it will panic. +// +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// When controlling the CSM client manually, you must also inject its request +// handlers into the SDK's Session configuration for the SDK's API clients to +// publish metrics. +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// // Add CSM client's metric publishing request handlers to the SDK's +// // Session Configuration. +// r.InjectHandlers(&sess.Handlers) +// +// Controlling CSM client +// +// Once the CSM client has been enabled the Get function will return a Reporter +// value that you can use to pause and resume the metrics published to the CSM +// agent. If Get function is called before the reporter is enabled with the +// Start function or via SDK's Session configuration nil will be returned. +// +// The Pause method can be called to stop the CSM client publishing metrics to +// the CSM agent. The Continue method will resume metric publishing. +// +// // Get the CSM client Reporter. +// r := csm.Get() +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 00000000000..4b19e2800e3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,89 @@ +package csm + +import ( + "fmt" + "strings" + "sync" +) + +var ( + lock sync.Mutex +) + +const ( + // DefaultPort is used when no port is specified. + DefaultPort = "31000" + + // DefaultHost is the host that will be used when none is specified. + DefaultHost = "127.0.0.1" +) + +// AddressWithDefaults returns a CSM address built from the host and port +// values. If the host or port is not set, default values will be used +// instead. If host is "localhost" it will be replaced with "127.0.0.1". +func AddressWithDefaults(host, port string) string { + if len(host) == 0 || strings.EqualFold(host, "localhost") { + host = DefaultHost + } + + if len(port) == 0 { + port = DefaultPort + } + + // Only IP6 host can contain a colon + if strings.Contains(host, ":") { + return "[" + host + "]:" + port + } + + return host + ":" + port +} + +// Start will start a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// r, err := csm.Start("clientID", "127.0.0.1:31000") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 00000000000..5bacc791a1e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,109 @@ +package csm + +import ( + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` + FinalAWSException *string `json:"FinalAwsException,omitempty"` + FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` + FinalSDKException *string `json:"FinalSdkException,omitempty"` + FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` + + MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` +} + +func (m *metric) TruncateFields() { + m.ClientID = truncateString(m.ClientID, 255) + m.UserAgent = truncateString(m.UserAgent, 256) + + m.AWSException = truncateString(m.AWSException, 128) + m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) + + m.SDKException = truncateString(m.SDKException, 128) + m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) + + m.FinalAWSException = truncateString(m.FinalAWSException, 128) + m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) + + m.FinalSDKException = truncateString(m.FinalSDKException, 128) + m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) +} + +func truncateString(v *string, l int) *string { + if v != nil && len(*v) > l { + nv := (*v)[:l] + return &nv + } + + return v +} + +func (m *metric) SetException(e metricException) { + switch te := e.(type) { + case awsException: + m.AWSException = aws.String(te.exception) + m.AWSExceptionMessage = aws.String(te.message) + case sdkException: + m.SDKException = aws.String(te.exception) + m.SDKExceptionMessage = aws.String(te.message) + } +} + +func (m *metric) SetFinalException(e metricException) { + switch te := e.(type) { + case awsException: + m.FinalAWSException = aws.String(te.exception) + m.FinalAWSExceptionMessage = aws.String(te.message) + case sdkException: + m.FinalSDKException = aws.String(te.exception) + m.FinalSDKExceptionMessage = aws.String(te.message) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 00000000000..82a3e345e93 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,55 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused *int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + paused: new(int64), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go new file mode 100644 index 00000000000..54a99280ce9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go @@ -0,0 +1,26 @@ +package csm + +type metricException interface { + Exception() string + Message() string +} + +type requestException struct { + exception string + message string +} + +func (e requestException) Exception() string { + return e.exception +} +func (e requestException) Message() string { + return e.message +} + +type awsException struct { + requestException +} + +type sdkException struct { + requestException +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 00000000000..835bcd49cba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,264 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetException(getMetricException(awserr)) + } + } + + m.TruncateFields() + rep.metricsCh.Push(m) +} + +func getMetricException(err awserr.Error) metricException { + msg := err.Error() + code := err.Code() + + switch code { + case request.ErrCodeRequestError, + request.ErrCodeSerialization, + request.CanceledErrorCode: + return sdkException{ + requestException{exception: code, message: msg}, + } + default: + return awsException{ + requestException{exception: code, message: msg}, + } + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Region: r.Config.Region, + Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), + } + + if r.HTTPResponse != nil { + m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetFinalException(getMetricException(awserr)) + } + } + + m.TruncateFields() + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from being +// added. It is safe to call concurrently with other calls to Pause, but if +// called concurently with Continue can lead to unexpected state. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring to be +// resumed. It is safe to call concurrently with other calls to Continue, but +// if called concurently with Pause can lead to unexpected state. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers +// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). +// +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + handlers.Complete.PushFrontNamed(request.NamedHandler{ + Name: APICallMetricHandlerName, + Fn: rep.sendAPICallMetric, + }) + + handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ + Name: APICallAttemptMetricHandlerName, + Fn: rep.sendAPICallAttemptMetric, + }) +} + +// boolIntValue return 1 for true and 0 for false. +func boolIntValue(b bool) int { + if b { + return 1 + } + + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 00000000000..23bb639e018 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,207 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "net" + "net/http" + "net/url" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithEndpointResolver(endpoints.DefaultResolver()) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: CredProviders(cfg, handlers), + }) +} + +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + +const ( + httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } + + if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) + return httpCredProvider(cfg, handlers, u) + } + + return ec2RoleProvider(cfg, handlers) +} + +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + resolver := cfg.EndpointResolver + if resolver == nil { + resolver = endpoints.DefaultResolver() + } + + e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), + ExpiryWindow: 5 * time.Minute, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 00000000000..ca0ee1dcc78 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 00000000000..69fa63dc08f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,250 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// getToken uses the duration to return a token for EC2 metadata service, +// or an error if the request failed. +func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) { + op := &request.Operation{ + Name: "GetToken", + HTTPMethod: "PUT", + HTTPPath: "/latest/api/token", + } + + var output tokenOutput + req := c.NewRequest(op, nil, &output) + req.SetContext(ctx) + + // remove the fetch token handler from the request handlers to avoid infinite recursion + req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) + + // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request. + req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler) + + ttl := strconv.FormatInt(int64(duration/time.Second), 10) + req.HTTPRequest.Header.Set(ttlHeader, ttl) + + err := req.Send() + + // Errors with bad request status should be returned. + if err != nil { + err = awserr.NewRequestFailure( + awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err), + req.HTTPResponse.StatusCode, req.RequestID) + } + + return output, err +} + +// GetMetadata uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + return c.GetMetadataWithContext(aws.BackgroundContext(), p) +} + +// GetMetadataWithContext uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/latest/meta-data", p), + } + output := &metadataOutput{} + + req := c.NewRequest(op, nil, output) + + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + return c.GetUserDataWithContext(aws.BackgroundContext()) +} + +// GetUserDataWithContext returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: "/latest/user-data", + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + return c.GetDynamicDataWithContext(aws.BackgroundContext(), p) +} + +// GetDynamicDataWithContext uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/latest/dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext()) +} + +// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + return c.IAMInfoWithContext(aws.BackgroundContext()) +} + +// IAMInfoWithContext retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) { + resp, err := c.GetMetadataWithContext(ctx, "iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + return c.RegionWithContext(aws.BackgroundContext()) +} + +// RegionWithContext returns the region the instance is running in. +func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) { + ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx) + if err != nil { + return "", err + } + // extract region from the ec2InstanceIdentityDocument + region := ec2InstanceIdentityDocument.Region + if len(region) == 0 { + return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil) + } + // returns region + return region, nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + return c.AvailableWithContext(aws.BackgroundContext()) +} + +// AvailableWithContext returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool { + if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshaling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 00000000000..df63bade104 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,245 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environment variable is set to true, (case insensitive). +// +// The endpoint of the EC2 IMDS client can be configured via the environment +// variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +// Session. See aws/session#Options.EC2IMDSEndpoint for more details. +package ec2metadata + +import ( + "bytes" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ServiceName is the name of the service. + ServiceName = "ec2metadata" + disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" + + // Headers for Token and TTL + ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds" + tokenHeader = "x-aws-ec2-metadata-token" + + // Named Handler constants + fetchTokenHandlerName = "FetchTokenHandler" + unmarshalMetadataHandlerName = "unmarshalMetadataHandler" + unmarshalTokenHandlerName = "unmarshalTokenHandler" + enableTokenProviderHandlerName = "enableTokenProviderHandler" + + // TTL constants + defaultTTL = 21600 * time.Second + ttlExpirationWindow = 30 * time.Second +) + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// Will remove the URL path from the endpoint provided to ensure the EC2 IMDS +// client is able to communicate with the EC2 IMDS API. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 1 * time.Second, + } + // max number of retries on the client operation + cfg.MaxRetries = aws.Int(2) + } + + if u, err := url.Parse(endpoint); err == nil { + // Remove path from the endpoint since it will be added by requests. + // This is an artifact of the SDK adding `/latest` to the endpoint for + // EC2 IMDS, but this is now moved to the operation definition. + u.Path = "" + u.RawPath = "" + endpoint = u.String() + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + // token provider instance + tp := newTokenProvider(svc, defaultTTL) + + // NamedHandler for fetching token + svc.Handlers.Sign.PushBackNamed(request.NamedHandler{ + Name: fetchTokenHandlerName, + Fn: tp.fetchTokenHandler, + }) + // NamedHandler for enabling token provider + svc.Handlers.Complete.PushBackNamed(request.NamedHandler{ + Name: enableTokenProviderHandlerName, + Fn: tp.enableTokenProviderHandler, + }) + + svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Disable the EC2 Metadata service if the environment variable is set. + // This short-circuits the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + } + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +type tokenOutput struct { + Token string + TTL time.Duration +} + +// unmarshal token handler is used to parse the response of a getToken operation +var unmarshalTokenHandler = request.NamedHandler{ + Name: unmarshalTokenHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + v := r.HTTPResponse.Header.Get(ttlHeader) + data, ok := r.Data.(*tokenOutput) + if !ok { + return + } + + data.Token = b.String() + // TTL is in seconds + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode, + "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + t := time.Duration(i) * time.Second + data.TTL = t + }, +} + +var unmarshalHandler = request.NamedHandler{ + Name: unmarshalMetadataHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } + }, +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err), + r.HTTPResponse.StatusCode, r.RequestID) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.NewRequestFailure( + awserr.New("EC2MetadataError", "failed to make EC2Metadata request\n"+b.String(), nil), + r.HTTPResponse.StatusCode, r.RequestID) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go new file mode 100644 index 00000000000..4b29f190bf9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -0,0 +1,93 @@ +package ec2metadata + +import ( + "net/http" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A tokenProvider struct provides access to EC2Metadata client +// and atomic instance of a token, along with configuredTTL for it. +// tokenProvider also provides an atomic flag to disable the +// fetch token operation. +// The disabled member will use 0 as false, and 1 as true. +type tokenProvider struct { + client *EC2Metadata + token atomic.Value + configuredTTL time.Duration + disabled uint32 +} + +// A ec2Token struct helps use of token in EC2 Metadata service ops +type ec2Token struct { + token string + credentials.Expiry +} + +// newTokenProvider provides a pointer to a tokenProvider instance +func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { + return &tokenProvider{client: c, configuredTTL: duration} +} + +// fetchTokenHandler fetches token for EC2Metadata service client by default. +func (t *tokenProvider) fetchTokenHandler(r *request.Request) { + + // short-circuits to insecure data flow if tokenProvider is disabled. + if v := atomic.LoadUint32(&t.disabled); v == 1 { + return + } + + if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + return + } + + output, err := t.client.getToken(r.Context(), t.configuredTTL) + + if err != nil { + + // change the disabled flag on token provider to true, + // when error is request timeout error. + if requestFailureError, ok := err.(awserr.RequestFailure); ok { + switch requestFailureError.StatusCode() { + case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: + atomic.StoreUint32(&t.disabled, 1) + case http.StatusBadRequest: + r.Error = requestFailureError + } + + // Check if request timed out while waiting for response + if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { + if e.Code() == request.ErrCodeRequestError { + atomic.StoreUint32(&t.disabled, 1) + } + } + } + return + } + + newToken := ec2Token{ + token: output.Token, + } + newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow) + t.token.Store(newToken) + + // Inject token header to the request. + if ec2Token, ok := t.token.Load().(ec2Token); ok { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + } +} + +// enableTokenProviderHandler enables the token provider +func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { + // If the error code status is 401, we enable the token provider + if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && + e.StatusCode() == http.StatusUnauthorized { + t.token.Store(ec2Token{}) + atomic.StoreUint32(&t.disabled, 0) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go new file mode 100644 index 00000000000..3efdac29ff4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -0,0 +1,290 @@ +package session + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/ssocreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + switch { + case len(sessOpts.Profile) != 0: + // User explicitly provided an Profile in the session's configuration + // so load that profile from shared config first. + // Github(aws/aws-sdk-go#2727) + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + + case envCfg.Creds.HasKeys(): + // Environment credentials + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + + case len(envCfg.WebIdentityTokenFilePath) != 0: + // Web identity token from environment, RoleARN required to also be + // set. + return assumeWebIdentity(cfg, handlers, + envCfg.WebIdentityTokenFilePath, + envCfg.RoleARN, + envCfg.RoleSessionName, + ) + + default: + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + } +} + +// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but +// 'AWS_ROLE_ARN' was not set. +var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) + +// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but +// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. +var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) + +func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, + filepath string, + roleARN, sessionName string, +) (*credentials.Credentials, error) { + + if len(filepath) == 0 { + return nil, WebIdentityEmptyTokenFilePathErr + } + + if len(roleARN) == 0 { + return nil, WebIdentityEmptyRoleARNErr + } + + creds := stscreds.NewWebIdentityCredentials( + &Session{ + Config: cfg, + Handlers: handlers.Copy(), + }, + roleARN, + sessionName, + filepath, + ) + + return creds, nil +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch { + case sharedCfg.SourceProfile != nil: + // Assume IAM role with credentials source from a different profile. + creds, err = resolveCredsFromProfile(cfg, envCfg, + *sharedCfg.SourceProfile, handlers, sessOpts, + ) + + case sharedCfg.Creds.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + creds = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + + case len(sharedCfg.CredentialSource) != 0: + creds, err = resolveCredsFromSource(cfg, envCfg, + sharedCfg, handlers, sessOpts, + ) + + case len(sharedCfg.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(cfg, handlers, + sharedCfg.WebIdentityTokenFile, + sharedCfg.RoleARN, + sharedCfg.RoleSessionName, + ) + + case sharedCfg.hasSSOConfiguration(): + creds, err = resolveSSOCredentials(cfg, sharedCfg, handlers) + + case len(sharedCfg.CredentialProcess) != 0: + // Get credentials from CredentialProcess + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + + default: + // Fallback to default credentials provider, include mock errors for + // the credential chain so user can identify why credentials failed to + // be retrieved. + creds = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + if err != nil { + return nil, err + } + + if len(sharedCfg.RoleARN) > 0 { + cfgCp := *cfg + cfgCp.Credentials = creds + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + } + + return creds, nil +} + +func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) (*credentials.Credentials, error) { + if err := sharedCfg.validateSSOConfiguration(); err != nil { + return nil, err + } + + cfgCopy := cfg.Copy() + cfgCopy.Region = &sharedCfg.SSORegion + + return ssocreds.NewCredentials( + &Session{ + Config: cfgCopy, + Handlers: handlers.Copy(), + }, + sharedCfg.SSOAccountID, + sharedCfg.SSORoleName, + sharedCfg.SSOStartURL, + ), nil +} + +// valid credential source values +const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + case credSourceEnvironment: + creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return nil, ErrSharedConfigECSContainerEnvVarEmpty + } + + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return creds, nil +} + +func credsFromAssumeRole(cfg aws.Config, + handlers request.Handlers, + sharedCfg sharedConfig, + sessOpts Options, +) (*credentials.Credentials, error) { + + if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return nil, AssumeRoleTokenProviderNotSetError{} + } + + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.RoleSessionName + + if sessOpts.AssumeRoleDuration == 0 && + sharedCfg.AssumeRoleDuration != nil && + *sharedCfg.AssumeRoleDuration/time.Minute > 15 { + opt.Duration = *sharedCfg.AssumeRoleDuration + } else if sessOpts.AssumeRoleDuration != 0 { + opt.Duration = sessOpts.AssumeRoleDuration + } + + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ), nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go new file mode 100644 index 00000000000..4390ad52f49 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go @@ -0,0 +1,28 @@ +//go:build go1.13 +// +build go1.13 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go new file mode 100644 index 00000000000..668565bea0c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go @@ -0,0 +1,27 @@ +//go:build !go1.13 && go1.7 +// +build !go1.13,go1.7 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go new file mode 100644 index 00000000000..e101aa6b6c0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go @@ -0,0 +1,23 @@ +//go:build !go1.6 && go1.5 +// +build !go1.6,go1.5 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go new file mode 100644 index 00000000000..b5fcbe0d1e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go @@ -0,0 +1,24 @@ +//go:build !go1.7 && go1.6 +// +build !go1.7,go1.6 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 00000000000..ff3cc012ae3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,367 @@ +/* +Package session provides configuration for the SDK's service clients. Sessions +can be shared across service clients that share the same base configuration. + +Sessions are safe to use concurrently as long as the Session is not being +modified. Sessions should be cached when possible, because creating a new +Session will load all configuration values from the environment, and config +files each time the Session is created. Sharing the Session value across all of +your service clients will ensure the configuration is loaded the fewest number +of times possible. + +Sessions options from Shared Config + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. Using the NewSessionWithOptions with +SharedConfigState set to SharedConfigEnable will create the session as if the +AWS_SDK_LOAD_CONFIG environment variable was set. + +Credential and config loading order + +The Session will attempt to load configuration and credentials from the +environment, configuration files, and other credential sources. The order +configuration is loaded in is: + + * Environment Variables + * Shared Credentials file + * Shared Configuration file (if SharedConfig is enabled) + * EC2 Instance Metadata (credentials only) + +The Environment variables for credentials will have precedence over shared +config even if SharedConfig is enabled. To override this behavior, and use +shared config credentials instead specify the session.Options.Profile, (e.g. +when using credential_source=Environment to assume a role). + + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "myProfile", + }) + +Creating Sessions + +Creating a Session without additional options will load credentials region, and +profile loaded from the environment and shared config automatically. See, +"Environment Variables" section for information on environment variables used +by Session. + + // Create Session + sess, err := session.NewSession() + + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded, config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-west-2"), + }) + +Use NewSessionWithOptions to provide additional configuration driving how the +Session's configuration will be loaded. Such as, specifying shared config +profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.NewSession() + sess, err := session.NewSessionWithOptions(session.Options{ + // Options + }) + + sess, err := session.NewSessionWithOptions(session.Options{ + // Specify profile to load for the session's config + Profile: "profile_name", + + // Provide SDK Config options, such as Region. + Config: aws.Config{ + Region: aws.String("us-west-2"), + }, + + // Force enable Shared Config support + SharedConfigState: session.SharedConfigEnable, + }) + +Adding Handlers + +You can add handlers to a session to decorate API operation, (e.g. adding HTTP +headers). All clients that use the Session receive a copy of the Session's +handlers. For example, the following request handler added to the Session logs +every requests made. + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess := session.Must(session.NewSession()) + + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Printf("Request: %s/%s, Params: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Shared Config Fields + +By default the SDK will only load the shared credentials file's +(~/.aws/credentials) credentials values, and all other config is provided by +the environment variables, SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file +(~/.aws/config). + +Credentials are the values the SDK uses to authenticating requests with AWS +Services. When specified in a file, both aws_access_key_id and +aws_secret_access_key must be provided together in the same file to be +considered valid. They will be ignored if both are not present. +aws_session_token is an optional field that can be provided in addition to the +other two fields. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + + ; region only supported if SharedConfigEnabled. + region = us-east-1 + +Assume Role configuration + +The role_arn field allows you to configure the SDK to assume an IAM role using +a set of credentials from another source. Such as when paired with static +credentials, "profile_source", "credential_process", or "credential_source" +fields. If "role_arn" is provided, a source of credentials must also be +specified, such as "source_profile", "credential_source", or +"credential_process". + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = + role_session_name = session_name + + +The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you +must also set the Session Option.AssumeRoleTokenProvider. The Session will fail +to load if the AssumeRoleTokenProvider is not specified. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + +To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider +documentation. + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Custom Shared Config and Credential Files + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + +Custom CA Bundle + +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. + +Custom Client TLS Certificate + +The SDK supports the environment and session option being configured with +Client TLS certificates that are sent as a part of the client's TLS handshake +for client authentication. If used, both Cert and Key values are required. If +one is missing, or either fail to load the contents of the file an error will +be returned. + +HTTP Client's Transport concrete implementation must be a http.Transport +or creating the session will fail. + + AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + +This can also be configured via the session.Options ClientTLSCert and ClientTLSKey. + + sess, err := session.NewSessionWithOptions(session.Options{ + ClientTLSCert: myCertFile, + ClientTLSKey: myKeyFile, + }) + +Custom EC2 IMDS Endpoint + +The endpoint of the EC2 IMDS client can be configured via the environment +variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +Session. See Options.EC2IMDSEndpoint for more details. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 + +If using an URL with an IPv6 address literal, the IPv6 address +component must be enclosed in square brackets. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + +The custom EC2 IMDS endpoint can also be specified via the Session options. + + sess, err := session.NewSessionWithOptions(session.Options{ + EC2MetadataEndpoint: "http://[::1]", + }) + +FIPS and DualStack Endpoints + +The SDK can be configured to resolve an endpoint with certain capabilities such as FIPS and DualStack. + +You can configure a FIPS endpoint using an environment variable, shared config ($HOME/.aws/config), +or programmatically. + +To configure a FIPS endpoint set the environment variable set the AWS_USE_FIPS_ENDPOINT to true or false to enable +or disable FIPS endpoint resolution. + + AWS_USE_FIPS_ENDPOINT=true + +To configure a FIPS endpoint using shared config, set use_fips_endpoint to true or false to enable +or disable FIPS endpoint resolution. + + [profile myprofile] + region=us-west-2 + use_fips_endpoint=true + +To configure a FIPS endpoint programmatically + + // Option 1: Configure it on a session for all clients + sess, err := session.NewSessionWithOptions(session.Options{ + UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, + }) + if err != nil { + // handle error + } + + client := s3.New(sess) + + // Option 2: Configure it per client + sess, err := session.NewSession() + if err != nil { + // handle error + } + + client := s3.New(sess, &aws.Config{ + UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, + }) + +You can configure a DualStack endpoint using an environment variable, shared config ($HOME/.aws/config), +or programmatically. + +To configure a DualStack endpoint set the environment variable set the AWS_USE_DUALSTACK_ENDPOINT to true or false to +enable or disable DualStack endpoint resolution. + + AWS_USE_DUALSTACK_ENDPOINT=true + +To configure a DualStack endpoint using shared config, set use_dualstack_endpoint to true or false to enable +or disable DualStack endpoint resolution. + + [profile myprofile] + region=us-west-2 + use_dualstack_endpoint=true + +To configure a DualStack endpoint programmatically + + // Option 1: Configure it on a session for all clients + sess, err := session.NewSessionWithOptions(session.Options{ + UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, + }) + if err != nil { + // handle error + } + + client := s3.New(sess) + + // Option 2: Configure it per client + sess, err := session.NewSession() + if err != nil { + // handle error + } + + client := s3.New(sess, &aws.Config{ + UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, + }) +*/ +package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 00000000000..d6fa24776cf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,471 @@ +package session + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + // Sets the TLC client certificate that should be used by the SDK's HTTP transport + // when making requests. The certificate must be paired with a TLS client key file. + // + // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + ClientTLSCert string + + // Sets the TLC client key that should be used by the SDK's HTTP transport + // when making requests. The key must be paired with a TLS client certificate file. + // + // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + ClientTLSKey string + + csmEnabled string + CSMEnabled *bool + CSMPort string + CSMHost string + CSMClientID string + + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery *bool + enableEndpointDiscovery string + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string + + // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint + // for a service. + // + // AWS_STS_REGIONAL_ENDPOINTS=regional + // This can take value as `regional` or `legacy` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the S3 Regional Endpoint flag for the SDK to resolve the + // endpoint for a service. + // + // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional + // This can take value as `regional` or `legacy` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion bool + + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + EC2IMDSEndpoint string + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 + EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // AWS_USE_DUALSTACK_ENDPOINT=true + UseDualStackEndpoint endpoints.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // AWS_USE_FIPS_ENDPOINT=true + UseFIPSEndpoint endpoints.FIPSEndpointState +} + +var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmHostEnvKey = []string{ + "AWS_CSM_HOST", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + enableEndpointDiscoveryEnvKey = []string{ + "AWS_ENABLE_ENDPOINT_DISCOVERY", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } + webIdentityTokenFilePathEnvKey = []string{ + "AWS_WEB_IDENTITY_TOKEN_FILE", + } + roleARNEnvKey = []string{ + "AWS_ROLE_ARN", + } + roleSessionNameEnvKey = []string{ + "AWS_ROLE_SESSION_NAME", + } + stsRegionalEndpointKey = []string{ + "AWS_STS_REGIONAL_ENDPOINTS", + } + s3UsEast1RegionalEndpoint = []string{ + "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", + } + s3UseARNRegionEnvKey = []string{ + "AWS_S3_USE_ARN_REGION", + } + ec2IMDSEndpointEnvKey = []string{ + "AWS_EC2_METADATA_SERVICE_ENDPOINT", + } + ec2IMDSEndpointModeEnvKey = []string{ + "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE", + } + useCABundleKey = []string{ + "AWS_CA_BUNDLE", + } + useClientTLSCert = []string{ + "AWS_SDK_GO_CLIENT_TLS_CERT", + } + useClientTLSKey = []string{ + "AWS_SDK_GO_CLIENT_TLS_KEY", + } + awsUseDualStackEndpoint = []string{ + "AWS_USE_DUALSTACK_ENDPOINT", + } + awsUseFIPSEndpoint = []string{ + "AWS_USE_FIPS_ENDPOINT", + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() (envConfig, error) { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() (envConfig, error) { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) (envConfig, error) { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + // Static environment credentials + var creds credentials.Value + setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&creds.SessionToken, credSessionEnvKey) + if creds.HasKeys() { + // Require logical grouping of credentials + creds.ProviderName = EnvProviderName + cfg.Creds = creds + } + + // Role Metadata + setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) + setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) + + // Web identity environment variables + setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) + + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + + if len(cfg.csmEnabled) != 0 { + v, _ := strconv.ParseBool(cfg.csmEnabled) + cfg.CSMEnabled = &v + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + // endpoint discovery is in reference to it being enabled. + setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) + if len(cfg.enableEndpointDiscovery) > 0 { + cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") + } + + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + + setFromEnvVal(&cfg.CustomCABundle, useCABundleKey) + setFromEnvVal(&cfg.ClientTLSCert, useClientTLSCert) + setFromEnvVal(&cfg.ClientTLSKey, useClientTLSKey) + + var err error + // STS Regional Endpoint variable + for _, k := range stsRegionalEndpointKey { + if v := os.Getenv(k); len(v) != 0 { + cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + // S3 Regional Endpoint variable + for _, k := range s3UsEast1RegionalEndpoint { + if v := os.Getenv(k); len(v) != 0 { + cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + var s3UseARNRegion string + setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) + if len(s3UseARNRegion) != 0 { + switch { + case strings.EqualFold(s3UseARNRegion, "false"): + cfg.S3UseARNRegion = false + case strings.EqualFold(s3UseARNRegion, "true"): + cfg.S3UseARNRegion = true + default: + return envConfig{}, fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + s3UseARNRegionEnvKey[0], s3UseARNRegion) + } + } + + setFromEnvVal(&cfg.EC2IMDSEndpoint, ec2IMDSEndpointEnvKey) + if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, ec2IMDSEndpointModeEnvKey); err != nil { + return envConfig{}, err + } + + if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, awsUseDualStackEndpoint); err != nil { + return cfg, err + } + + if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, awsUseFIPSEndpoint); err != nil { + return cfg, err + } + + return cfg, nil +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) != 0 { + *dst = v + break + } + } +} + +func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + if err := mode.SetFromString(value); err != nil { + return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err) + } + return nil + } + return nil +} + +func setUseDualStackEndpointFromEnvVal(dst *endpoints.DualStackEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = endpoints.DualStackEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = endpoints.DualStackEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} + +func setUseFIPSEndpointFromEnvVal(dst *endpoints.FIPSEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = endpoints.FIPSEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = endpoints.FIPSEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 00000000000..ebace4bb79d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,992 @@ +package session + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/csm" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ErrCodeSharedConfig represents an error that occurs in the shared + // configuration logic + ErrCodeSharedConfig = "SharedConfigErr" + + // ErrCodeLoadCustomCABundle error code for unable to load custom CA bundle. + ErrCodeLoadCustomCABundle = "LoadCustomCABundleError" + + // ErrCodeLoadClientTLSCert error code for unable to load client TLS + // certificate or key + ErrCodeLoadClientTLSCert = "LoadClientTLSCertError" +) + +// ErrSharedConfigSourceCollision will be returned if a section contains both +// source_profile and credential_source +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil) + +// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment +// variables are empty and Environment was set as the credential source +var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) + +// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided +var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers + + options Options +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occurred while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg, envErr := loadEnvConfig() + + if envCfg.EnableSharedConfig { + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occurring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.logDeprecatedNewSessionError(msg, err, cfgs) + } + + return s + } + + s := deprecatedNewSession(envCfg, cfgs...) + if envErr != nil { + msg := "failed to load env config" + s.logDeprecatedNewSessionError(msg, envErr, cfgs) + } + + if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + msg := "failed to enable CSM" + s.logDeprecatedNewSessionError(msg, err, cfgs) + } + } + + return s +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created, such as specifying the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + opts := Options{} + opts.Config.MergeIn(cfgs...) + + return NewSessionWithOptions(opts) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevant. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role wit MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // When the SDK's shared config is configured to assume a role this option + // may be provided to set the expiry duration of the STS credentials. + // Defaults to 15 minutes if not set as documented in the + // stscreds.AssumeRoleProvider. + AssumeRoleDuration time.Duration + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // Can also be specified via the environment variable: + // + // AWS_CA_BUNDLE=$HOME/ca_bundle + // + // Can also be specified via the shared config field: + // + // ca_bundle = $HOME/ca_bundle + CustomCABundle io.Reader + + // Reader for the TLC client certificate that should be used by the SDK's + // HTTP transport when making requests. The certificate must be paired with + // a TLS client key file. Will be ignored if both are not provided. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // Can also be specified via the environment variable: + // + // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + ClientTLSCert io.Reader + + // Reader for the TLC client key that should be used by the SDK's HTTP + // transport when making requests. The key must be paired with a TLS client + // certificate file. Will be ignored if both are not provided. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // Can also be specified via the environment variable: + // + // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + ClientTLSKey io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers + + // Allows specifying a custom endpoint to be used by the EC2 IMDS client + // when making requests to the EC2 IMDS API. The endpoint value should + // include the URI scheme. If the scheme is not present it will be defaulted to http. + // + // If unset, will the EC2 IMDS client will use its default endpoint. + // + // Can also be specified via the environment variable, + // AWS_EC2_METADATA_SERVICE_ENDPOINT. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 + // + // If using an URL with an IPv6 address literal, the IPv6 address + // component must be enclosed in square brackets. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + EC2IMDSEndpoint string + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 + EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) +// +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) +// +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) +func NewSessionWithOptions(opts Options) (*Session, error) { + var envCfg envConfig + var err error + if opts.SharedConfigState == SharedConfigEnable { + envCfg, err = loadSharedEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load shared config, %v", err) + } + } else { + envCfg, err = loadEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load environment config, %v", err) + } + } + + if len(opts.Profile) != 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + return newSession(opts, envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +// Wraps the endpoint resolver with a resolver that will return a custom +// endpoint for EC2 IMDS. +func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string, mode endpoints.EC2IMDSEndpointModeState) endpoints.Resolver { + return endpoints.ResolverFunc( + func(service, region string, opts ...func(*endpoints.Options)) ( + endpoints.ResolvedEndpoint, error, + ) { + if service == ec2MetadataServiceID && len(endpoint) > 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoint, + SigningName: ec2MetadataServiceID, + SigningRegion: region, + }, nil + } else if service == ec2MetadataServiceID { + opts = append(opts, func(o *endpoints.Options) { + o.EC2MetadataEndpointMode = mode + }) + } + return resolver.EndpointFor(service, region, opts...) + }) +} + +func deprecatedNewSession(envCfg envConfig, cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + if cfg.EndpointResolver == nil { + // An endpoint resolver is required for a session to be able to provide + // endpoints for service client configurations. + cfg.EndpointResolver = endpoints.DefaultResolver() + } + + if !(len(envCfg.EC2IMDSEndpoint) == 0 && envCfg.EC2IMDSEndpointMode == endpoints.EC2IMDSEndpointModeStateUnset) { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint, envCfg.EC2IMDSEndpointMode) + } + + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + options: Options{ + EC2IMDSEndpoint: envCfg.EC2IMDSEndpoint, + }, + } + + initHandlers(s) + return s +} + +func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { + if logger != nil { + logger.Log("Enabling CSM") + } + + r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) + if err != nil { + return err + } + r.InjectHandlers(handlers) + + return nil +} + +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + cfg.MergeIn(userCfg) + + // Ordered config files will be loaded in with later files overwriting + // previous config file values. + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) + if err != nil { + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return nil, err + } + } + + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } + + if err := setTLSOptions(&opts, cfg, envCfg, sharedCfg); err != nil { + return nil, err + } + + s := &Session{ + Config: cfg, + Handlers: handlers, + options: opts, + } + + initHandlers(s) + + if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + return nil, err + } + } + + return s, nil +} + +type csmConfig struct { + Enabled bool + Host string + Port string + ClientID string +} + +var csmProfileName = "aws_csm" + +func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { + if envCfg.CSMEnabled != nil { + if *envCfg.CSMEnabled { + return csmConfig{ + Enabled: true, + ClientID: envCfg.CSMClientID, + Host: envCfg.CSMHost, + Port: envCfg.CSMPort, + }, nil + } + return csmConfig{}, nil + } + + sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return csmConfig{}, err + } + } + if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { + return csmConfig{ + Enabled: true, + ClientID: sharedCfg.CSMClientID, + Host: sharedCfg.CSMHost, + Port: sharedCfg.CSMPort, + }, nil + } + + return csmConfig{}, nil +} + +func setTLSOptions(opts *Options, cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error { + // CA Bundle can be specified in both environment variable shared config file. + var caBundleFilename = envCfg.CustomCABundle + if len(caBundleFilename) == 0 { + caBundleFilename = sharedCfg.CustomCABundle + } + + // Only use environment value if session option is not provided. + customTLSOptions := map[string]struct { + filename string + field *io.Reader + errCode string + }{ + "custom CA bundle PEM": {filename: caBundleFilename, field: &opts.CustomCABundle, errCode: ErrCodeLoadCustomCABundle}, + "custom client TLS cert": {filename: envCfg.ClientTLSCert, field: &opts.ClientTLSCert, errCode: ErrCodeLoadClientTLSCert}, + "custom client TLS key": {filename: envCfg.ClientTLSKey, field: &opts.ClientTLSKey, errCode: ErrCodeLoadClientTLSCert}, + } + for name, v := range customTLSOptions { + if len(v.filename) != 0 && *v.field == nil { + f, err := os.Open(v.filename) + if err != nil { + return awserr.New(v.errCode, fmt.Sprintf("failed to open %s file", name), err) + } + defer f.Close() + *v.field = f + } + } + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(cfg.HTTPClient, opts.CustomCABundle); err != nil { + return err + } + } + + // Setup HTTP client TLS certificate and key for client TLS authentication. + if opts.ClientTLSCert != nil && opts.ClientTLSKey != nil { + if err := loadClientTLSCert(cfg.HTTPClient, opts.ClientTLSCert, opts.ClientTLSKey); err != nil { + return err + } + } else if opts.ClientTLSCert == nil && opts.ClientTLSKey == nil { + // Do nothing if neither values are available. + + } else { + return awserr.New(ErrCodeLoadClientTLSCert, + fmt.Sprintf("client TLS cert(%t) and key(%t) must both be provided", + opts.ClientTLSCert != nil, opts.ClientTLSKey != nil), nil) + } + + return nil +} + +func getHTTPTransport(client *http.Client) (*http.Transport, error) { + var t *http.Transport + switch v := client.Transport.(type) { + case *http.Transport: + t = v + default: + if client.Transport != nil { + return nil, fmt.Errorf("unsupported transport, %T", client.Transport) + } + } + if t == nil { + // Nil transport implies `http.DefaultTransport` should be used. Since + // the SDK cannot modify, nor copy the `DefaultTransport` specifying + // the values the next closest behavior. + t = getCustomTransport() + } + + return t, nil +} + +func loadCustomCABundle(client *http.Client, bundle io.Reader) error { + t, err := getHTTPTransport(client) + if err != nil { + return awserr.New(ErrCodeLoadCustomCABundle, + "unable to load custom CA bundle, HTTPClient's transport unsupported type", err) + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + client.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeLoadCustomCABundle, + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New(ErrCodeLoadCustomCABundle, + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func loadClientTLSCert(client *http.Client, certFile, keyFile io.Reader) error { + t, err := getHTTPTransport(client) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get usable HTTP transport from client", err) + } + + cert, err := ioutil.ReadAll(certFile) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get read client TLS cert file", err) + } + + key, err := ioutil.ReadAll(keyFile) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get read client TLS key file", err) + } + + clientCert, err := tls.X509KeyPair(cert, key) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to load x509 key pair from client cert", err) + } + + tlsCfg := t.TLSClientConfig + if tlsCfg == nil { + tlsCfg = &tls.Config{} + } + + tlsCfg.Certificates = append(tlsCfg.Certificates, clientCert) + + t.TLSClientConfig = tlsCfg + client.Transport = t + + return nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + if cfg.EnableEndpointDiscovery == nil { + if envCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) + } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) + } + } + + // Regional Endpoint flag for STS endpoint resolving + mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ + userCfg.STSRegionalEndpoint, + envCfg.STSRegionalEndpoint, + sharedCfg.STSRegionalEndpoint, + endpoints.LegacySTSEndpoint, + }) + + // Regional Endpoint flag for S3 endpoint resolving + mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ + userCfg.S3UsEast1RegionalEndpoint, + envCfg.S3UsEast1RegionalEndpoint, + sharedCfg.S3UsEast1RegionalEndpoint, + endpoints.LegacyS3UsEast1Endpoint, + }) + + var ec2IMDSEndpoint string + for _, v := range []string{ + sessOpts.EC2IMDSEndpoint, + envCfg.EC2IMDSEndpoint, + sharedCfg.EC2IMDSEndpoint, + } { + if len(v) != 0 { + ec2IMDSEndpoint = v + break + } + } + + var endpointMode endpoints.EC2IMDSEndpointModeState + for _, v := range []endpoints.EC2IMDSEndpointModeState{ + sessOpts.EC2IMDSEndpointMode, + envCfg.EC2IMDSEndpointMode, + sharedCfg.EC2IMDSEndpointMode, + } { + if v != endpoints.EC2IMDSEndpointModeStateUnset { + endpointMode = v + break + } + } + + if len(ec2IMDSEndpoint) != 0 || endpointMode != endpoints.EC2IMDSEndpointModeStateUnset { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode) + } + + // Configure credentials if not already set by the user when creating the + // Session. + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err + } + cfg.Credentials = creds + } + + cfg.S3UseARNRegion = userCfg.S3UseARNRegion + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &envCfg.S3UseARNRegion + } + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion + } + + for _, v := range []endpoints.DualStackEndpointState{userCfg.UseDualStackEndpoint, envCfg.UseDualStackEndpoint, sharedCfg.UseDualStackEndpoint} { + if v != endpoints.DualStackEndpointStateUnset { + cfg.UseDualStackEndpoint = v + break + } + } + + for _, v := range []endpoints.FIPSEndpointState{userCfg.UseFIPSEndpoint, envCfg.UseFIPSEndpoint, sharedCfg.UseFIPSEndpoint} { + if v != endpoints.FIPSEndpointStateUnset { + cfg.UseFIPSEndpoint = v + break + } + } + + return nil +} + +func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = v + break + } + } +} + +func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetS3UsEast1Endpoint { + cfg.S3UsEast1RegionalEndpoint = v + break + } + } +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, copying the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + options: s.options, + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + resolvedRegion := normalizeRegion(s.Config) + + region := aws.StringValue(s.Config.Region) + resolved, err := s.resolveEndpoint(service, region, resolvedRegion, s.Config) + if err != nil { + s.Handlers.Validate.PushBack(func(r *request.Request) { + if len(r.ClientInfo.Endpoint) != 0 { + // Error occurred while resolving endpoint, but the request + // being invoked has had an endpoint specified after the client + // was created. + return + } + r.Error = err + }) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + PartitionID: resolved.PartitionID, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + ResolvedRegion: resolvedRegion, + } +} + +const ec2MetadataServiceID = "ec2metadata" + +func (s *Session) resolveEndpoint(service, region, resolvedRegion string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { + + if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), + SigningRegion: region, + }, nil + } + + resolved, err := cfg.EndpointResolver.EndpointFor(service, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + opt.UseDualStackEndpoint = cfg.UseDualStackEndpoint + + opt.UseFIPSEndpoint = cfg.UseFIPSEndpoint + + // Support for STSRegionalEndpoint where the STSRegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint + + // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + + opt.ResolvedRegion = resolvedRegion + + opt.Logger = cfg.Logger + opt.LogDeprecated = cfg.LogLevel.Matches(aws.LogDebugWithDeprecated) + }, + ) + if err != nil { + return endpoints.ResolvedEndpoint{}, err + } + + return resolved, nil +} + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + resolvedRegion := normalizeRegion(s.Config) + + var resolved endpoints.ResolvedEndpoint + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = aws.StringValue(s.Config.Region) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + ResolvedRegion: resolvedRegion, + } +} + +// logDeprecatedNewSessionError function enables error handling for session +func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) +} + +// normalizeRegion resolves / normalizes the configured region (converts pseudo fips regions), and modifies the provided +// config to have the equivalent options for resolution and returns the resolved region name. +func normalizeRegion(cfg *aws.Config) (resolved string) { + const fipsInfix = "-fips-" + const fipsPrefix = "-fips" + const fipsSuffix = "fips-" + + region := aws.StringValue(cfg.Region) + + if strings.Contains(region, fipsInfix) || + strings.Contains(region, fipsPrefix) || + strings.Contains(region, fipsSuffix) { + resolved = strings.Replace(strings.Replace(strings.Replace( + region, fipsInfix, "-", -1), fipsPrefix, "", -1), fipsSuffix, "", -1) + cfg.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + } + + return resolved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 00000000000..424c82b4d34 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,729 @@ +package session + +import ( + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/internal/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + roleDurationSecondsKey = "duration_seconds" // optional + + // AWS Single Sign-On (AWS SSO) group + ssoAccountIDKey = "sso_account_id" + ssoRegionKey = "sso_region" + ssoRoleNameKey = "sso_role_name" + ssoStartURL = "sso_start_url" + + // CSM options + csmEnabledKey = `csm_enabled` + csmHostKey = `csm_host` + csmPortKey = `csm_port` + csmClientIDKey = `csm_client_id` + + // Additional Config fields + regionKey = `region` + + // custom CA Bundle filename + customCABundleKey = `ca_bundle` + + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + + // External Credential Process + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // Additional config fields for regional or legacy endpoints + stsRegionalEndpointSharedKey = `sts_regional_endpoints` + + // Additional config fields for regional or legacy endpoints + s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` + + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" + + // EC2 IMDS Endpoint Mode + ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode" + + // EC2 IMDS Endpoint + ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" + + // Use DualStack Endpoint Resolution + useDualStackEndpoint = "use_dualstack_endpoint" + + // Use FIPS Endpoint Resolution + useFIPSEndpointKey = "use_fips_endpoint" +) + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + Profile string + + // Credentials values from the config file. Both aws_access_key_id and + // aws_secret_access_key must be provided together in the same file to be + // considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of + // the other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string + + SSOAccountID string + SSORegion string + SSORoleName string + SSOStartURL string + + RoleARN string + RoleSessionName string + ExternalID string + MFASerial string + AssumeRoleDuration *time.Duration + + SourceProfileName string + SourceProfile *sharedConfig + + // Region is the region the SDK should use for looking up AWS service + // endpoints and signing requests. + // + // region + Region string + + // CustomCABundle is the file path to a PEM file the SDK will read and + // use to configure the HTTP transport with additional CA certs that are + // not present in the platforms default CA store. + // + // This value will be ignored if the file does not exist. + // + // ca_bundle + CustomCABundle string + + // EnableEndpointDiscovery can be enabled in the shared config by setting + // endpoint_discovery_enabled to true + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery *bool + + // CSM Options + CSMEnabled *bool + CSMHost string + CSMPort string + CSMClientID string + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // sts_regional_endpoints = regional + // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // s3_us_east_1_regional_endpoint = regional + // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion bool + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // ec2_metadata_service_endpoint_mode=IPv6 + EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. + // + // ec2_metadata_service_endpoint=http://fd00:ec2::254 + EC2IMDSEndpoint string + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // use_dualstack_endpoint=true + UseDualStackEndpoint endpoints.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // use_fips_endpoint=true + UseFIPSEndpoint endpoints.FIPSEndpointState +} + +type sharedConfigFile struct { + Filename string + IniData ini.Sections +} + +// loadSharedConfig retrieves the configuration from the list of files using +// the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of +// A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + profiles := map[string]struct{}{} + if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { + return sharedConfig{}, err + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + sections, err := ini.OpenFile(filename) + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { + // Skip files which can't be opened and read for whatever reason + continue + } else if err != nil { + return nil, SharedConfigLoadError{Filename: filename, Err: err} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: sections, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { + cfg.Profile = profile + + // Trim files from the list that don't exist. + var skippedFiles int + var profileNotFoundErr error + for _, f := range files { + if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore profiles not defined in individual files. + profileNotFoundErr = err + skippedFiles++ + continue + } + return err + } + } + if skippedFiles == len(files) { + // If all files were skipped because the profile is not found, return + // the original profile not found error. + return profileNotFoundErr + } + + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + cfg.clearAssumeRoleOptions() + } else { + // First time a profile has been seen, It must either be a assume role + // credentials, or SSO. Assert if the credential type requires a role ARN, + // the ARN is also set, or validate that the SSO configuration is complete. + if err := cfg.validateCredentialsConfig(profile); err != nil { + return err + } + } + profiles[profile] = struct{}{} + + if err := cfg.validateCredentialType(); err != nil { + return err + } + + // Link source profiles for assume roles + if len(cfg.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + cfg.clearCredentialOptions() + + srcCfg := &sharedConfig{} + err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) + if err != nil { + // SourceProfile that doesn't exist is an error in configuration. + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + err = SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + return err + } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + + cfg.SourceProfile = srcCfg + } + + return nil +} + +// setFromFile loads the configuration from the file using the profile +// provided. A sharedConfig pointer type value is used so that multiple config +// file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For +// example if a config file only includes aws_access_key_id but no +// aws_secret_access_key the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { + section, ok := file.IniData.GetSection(profile) + if !ok { + // Fallback to to alternate profile name: profile + section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if !ok { + return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} + } + } + + if exOpts { + // Assume Role Parameters + updateString(&cfg.RoleARN, section, roleArnKey) + updateString(&cfg.ExternalID, section, externalIDKey) + updateString(&cfg.MFASerial, section, mfaSerialKey) + updateString(&cfg.RoleSessionName, section, roleSessionNameKey) + updateString(&cfg.SourceProfileName, section, sourceProfileKey) + updateString(&cfg.CredentialSource, section, credentialSourceKey) + updateString(&cfg.Region, section, regionKey) + updateString(&cfg.CustomCABundle, section, customCABundleKey) + + if section.Has(roleDurationSecondsKey) { + d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second + cfg.AssumeRoleDuration = &d + } + + if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { + sre, err := endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + stsRegionalEndpointSharedKey, file.Filename, err) + } + cfg.STSRegionalEndpoint = sre + } + + if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { + sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + s3UsEast1RegionalSharedKey, file.Filename, err) + } + cfg.S3UsEast1RegionalEndpoint = sre + } + + // AWS Single Sign-On (AWS SSO) + updateString(&cfg.SSOAccountID, section, ssoAccountIDKey) + updateString(&cfg.SSORegion, section, ssoRegionKey) + updateString(&cfg.SSORoleName, section, ssoRoleNameKey) + updateString(&cfg.SSOStartURL, section, ssoStartURL) + + if err := updateEC2MetadataServiceEndpointMode(&cfg.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + ec2MetadataServiceEndpointModeKey, file.Filename, err) + } + updateString(&cfg.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) + + updateUseDualStackEndpoint(&cfg.UseDualStackEndpoint, section, useDualStackEndpoint) + + updateUseFIPSEndpoint(&cfg.UseFIPSEndpoint, section, useFIPSEndpointKey) + } + + updateString(&cfg.CredentialProcess, section, credentialProcessKey) + updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) + + // Shared Credentials + creds := credentials.Value{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + if creds.HasKeys() { + cfg.Creds = creds + } + + // Endpoint discovery + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + // CSM options + updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) + updateString(&cfg.CSMHost, section, csmHostKey) + updateString(&cfg.CSMPort, section, csmPortKey) + updateString(&cfg.CSMClientID, section, csmClientIDKey) + + updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) + + return nil +} + +func updateEC2MetadataServiceEndpointMode(endpointMode *endpoints.EC2IMDSEndpointModeState, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + value := section.String(key) + return endpointMode.SetFromString(value) +} + +func (cfg *sharedConfig) validateCredentialsConfig(profile string) error { + if err := cfg.validateCredentialsRequireARN(profile); err != nil { + return err + } + + return nil +} + +func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(cfg.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(cfg.CredentialSource) != 0: + credSource = credentialSourceKey + case len(cfg.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(cfg.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } + } + + return nil +} + +func (cfg *sharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(cfg.SourceProfileName) != 0, + len(cfg.CredentialSource) != 0, + len(cfg.CredentialProcess) != 0, + len(cfg.WebIdentityTokenFile) != 0, + ) { + return ErrSharedConfigSourceCollision + } + + return nil +} + +func (cfg *sharedConfig) validateSSOConfiguration() error { + if !cfg.hasSSOConfiguration() { + return nil + } + + var missing []string + if len(cfg.SSOAccountID) == 0 { + missing = append(missing, ssoAccountIDKey) + } + + if len(cfg.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(cfg.SSORoleName) == 0 { + missing = append(missing, ssoRoleNameKey) + } + + if len(cfg.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + cfg.Profile, strings.Join(missing, ", ")) + } + + return nil +} + +func (cfg *sharedConfig) hasCredentials() bool { + switch { + case len(cfg.SourceProfileName) != 0: + case len(cfg.CredentialSource) != 0: + case len(cfg.CredentialProcess) != 0: + case len(cfg.WebIdentityTokenFile) != 0: + case cfg.hasSSOConfiguration(): + case cfg.Creds.HasKeys(): + default: + return false + } + + return true +} + +func (cfg *sharedConfig) clearCredentialOptions() { + cfg.CredentialSource = "" + cfg.CredentialProcess = "" + cfg.WebIdentityTokenFile = "" + cfg.Creds = credentials.Value{} + cfg.SSOAccountID = "" + cfg.SSORegion = "" + cfg.SSORoleName = "" + cfg.SSOStartURL = "" +} + +func (cfg *sharedConfig) clearAssumeRoleOptions() { + cfg.RoleARN = "" + cfg.ExternalID = "" + cfg.MFASerial = "" + cfg.RoleSessionName = "" + cfg.SourceProfileName = "" +} + +func (cfg *sharedConfig) hasSSOConfiguration() bool { + switch { + case len(cfg.SSOAccountID) != 0: + case len(cfg.SSORegion) != 0: + case len(cfg.SSORoleName) != 0: + case len(cfg.SSOStartURL) != 0: + default: + return false + } + return true +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.Bool(key) +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string + SourceProfile string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf( + "failed to load assume role for %s, source profile %s has no shared credentials", + e.RoleARN, e.SourceProfile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Code is the short id of the error. +func (e CredentialRequiresARNError) Code() string { + return "CredentialRequiresARNError" +} + +// Message is the description of the error +func (e CredentialRequiresARNError) Message() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e CredentialRequiresARNError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseDualStackEndpoint(dst *endpoints.DualStackEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + if section.Bool(key) { + *dst = endpoints.DualStackEndpointStateEnabled + } else { + *dst = endpoints.DualStackEndpointStateDisabled + } + + return +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseFIPSEndpoint(dst *endpoints.FIPSEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + if section.Bool(key) { + *dst = endpoints.FIPSEndpointStateEnabled + } else { + *dst = endpoints.FIPSEndpointStateDisabled + } + + return +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 00000000000..38ea61afeaa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 00000000000..2aec80661a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,298 @@ +// Package jsonutil provides JSON serialization of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + origVal := value + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(origVal, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + if !value.IsValid() && tag.Get("type") != "structure" { + return nil + } + } + + buf.WriteByte('{') + defer buf.WriteString("}") + + if !value.IsValid() { + return nil + } + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + + // This allocates the most memory. + // Additionally, we cannot skip nil fields due to + // idempotency auto filling. + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + writeString(name, buf) + buf.WriteString(`:`) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + sv := sortedValues(value.MapKeys()) + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + writeString(k.String(), buf) + buf.WriteString(`:`) + + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + // prevents allocation on the heap. + scratch := [64]byte{} + switch value := reflect.Indirect(v); value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + if value.Bool() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + case reflect.Int64: + buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) + case reflect.Float64: + f := value.Float() + if math.IsInf(f, 0) || math.IsNaN(f) { + return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + } + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) + default: + switch converted := value.Interface().(type) { + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.UnixTimeFormatName + } + + ts := protocol.FormatTime(format, converted) + if format != protocol.UnixTimeFormatName { + ts = `"` + ts + `"` + } + + buf.WriteString(ts) + case []byte: + if !value.IsNil() { + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + case aws.JSONValue: + str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) + if err != nil { + return fmt.Errorf("unable to encode JSONValue, %v", err) + } + buf.WriteString(str) + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +var hex = "0123456789abcdef" + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for i := 0; i < len(s); i++ { + if s[i] == '"' { + buf.WriteString(`\"`) + } else if s[i] == '\\' { + buf.WriteString(`\\`) + } else if s[i] == '\b' { + buf.WriteString(`\b`) + } else if s[i] == '\f' { + buf.WriteString(`\f`) + } else if s[i] == '\r' { + buf.WriteString(`\r`) + } else if s[i] == '\t' { + buf.WriteString(`\t`) + } else if s[i] == '\n' { + buf.WriteString(`\n`) + } else if s[i] < 32 { + buf.WriteString("\\u00") + buf.WriteByte(hex[s[i]>>4]) + buf.WriteByte(hex[s[i]&0xF]) + } else { + buf.WriteByte(s[i]) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 00000000000..8b2c9bbeba0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,304 @@ +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "math/big" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var millisecondsFloat = new(big.Float).SetInt64(1e3) + +// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in +// type. The value to unmarshal the json document into must be a pointer to the +// type. +func UnmarshalJSONError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := json.NewDecoder(body).Decode(v) + if err != nil { + msg := "failed decoding error message" + if err == io.EOF { + msg = "error message missing" + err = nil + } + return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) + } + + return nil +} + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") +} + +// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the +// object v. Ignores casing for structure members. +func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { + var out interface{} + + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{ + caseInsensitive: true, + }.unmarshalAny(reflect.ValueOf(v), out, "") +} + +type unmarshaler struct { + caseInsensitive bool +} + +func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return u.unmarshalStruct(value, data, tag) + case "list": + return u.unmarshalList(value, data, tag) + case "map": + return u.unmarshalMap(value, data, tag) + default: + return u.unmarshalScalar(value, data, tag) + } +} + +func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if u.caseInsensitive { + if _, ok := mapData[name]; !ok { + // Fallback to uncased name search if the exact name didn't match. + for kn, v := range mapData { + if strings.EqualFold(kn, name) { + mapData[name] = v + } + } + } + } + + member := value.FieldByIndex(field.Index) + err := u.unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := u.unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + u.unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + // No need to use escaping as the value is a non-quoted string. + v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) + if err != nil { + return err + } + value.Set(reflect.ValueOf(v)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case json.Number: + switch value.Interface().(type) { + case *int64: + // Retain the old behavior where we would just truncate the float64 + // calling d.Int64() here could cause an invalid syntax error due to the usage of strconv.ParseInt + f, err := d.Float64() + if err != nil { + return err + } + di := int64(f) + value.Set(reflect.ValueOf(&di)) + case *float64: + f, err := d.Float64() + if err != nil { + return err + } + value.Set(reflect.ValueOf(&f)) + case *time.Time: + float, ok := new(big.Float).SetString(d.String()) + if !ok { + return fmt.Errorf("unsupported float time representation: %v", d.String()) + } + float = float.Mul(float, millisecondsFloat) + ms, _ := float.Int64() + t := time.Unix(0, ms*1e6).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go new file mode 100644 index 00000000000..d9aa271148d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -0,0 +1,87 @@ +// Package jsonrpc provides JSON RPC utilities for serialization of AWS +// requests and responses. +package jsonrpc + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/json.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/json.json unmarshal_test.go + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +var emptyJSON = []byte("{}") + +// BuildHandler is a named request handler for building jsonrpc protocol +// requests +var BuildHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Build", + Fn: Build, +} + +// UnmarshalHandler is a named request handler for unmarshaling jsonrpc +// protocol requests +var UnmarshalHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Unmarshal", + Fn: Unmarshal, +} + +// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc +// protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalMeta", + Fn: UnmarshalMeta, +} + +// Build builds a JSON payload for a JSON RPC request. +func Build(req *request.Request) { + var buf []byte + var err error + if req.ParamsFilled() { + buf, err = jsonutil.BuildJSON(req.Params) + if err != nil { + req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err) + return + } + } else { + buf = emptyJSON + } + + // Always serialize the body, don't suppress it. + req.SetBufferBody(buf) + + if req.ClientInfo.TargetPrefix != "" { + target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name + req.HTTPRequest.Header.Add("X-Amz-Target", target) + } + + // Only set the content type if one is not already specified and an + // JSONVersion is specified. + if ct, v := req.HTTPRequest.Header.Get("Content-Type"), req.ClientInfo.JSONVersion; len(ct) == 0 && len(v) != 0 { + jsonVersion := req.ClientInfo.JSONVersion + req.HTTPRequest.Header.Set("Content-Type", "application/x-amz-json-"+jsonVersion) + } +} + +// Unmarshal unmarshals a response for a JSON RPC service. +func Unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.DataFilled() { + err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + } + } + return +} + +// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. +func UnmarshalMeta(req *request.Request) { + rest.UnmarshalMeta(req) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go new file mode 100644 index 00000000000..c0c52e2db0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go @@ -0,0 +1,107 @@ +package jsonrpc + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +// UnmarshalTypedError provides unmarshaling errors API response errors +// for both typed and untyped errors. +type UnmarshalTypedError struct { + exceptions map[string]func(protocol.ResponseMetadata) error +} + +// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the +// set of exception names to the error unmarshalers +func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { + return &UnmarshalTypedError{ + exceptions: exceptions, + } +} + +// UnmarshalError attempts to unmarshal the HTTP response error as a known +// error type. If unable to unmarshal the error type, the generic SDK error +// type will be used. +func (u *UnmarshalTypedError) UnmarshalError( + resp *http.Response, + respMeta protocol.ResponseMetadata, +) (error, error) { + + var buf bytes.Buffer + var jsonErr jsonErrorResponse + teeReader := io.TeeReader(resp.Body, &buf) + err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) + if err != nil { + return nil, err + } + body := ioutil.NopCloser(&buf) + + // Code may be separated by hash(#), with the last element being the code + // used by the SDK. + codeParts := strings.SplitN(jsonErr.Code, "#", 2) + code := codeParts[len(codeParts)-1] + msg := jsonErr.Message + + if fn, ok := u.exceptions[code]; ok { + // If exception code is know, use associated constructor to get a value + // for the exception that the JSON body can be unmarshaled into. + v := fn(respMeta) + err := jsonutil.UnmarshalJSONCaseInsensitive(v, body) + if err != nil { + return nil, err + } + + return v, nil + } + + // fallback to unmodeled generic exceptions + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil +} + +// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc +// protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalError", + Fn: UnmarshalError, +} + +// UnmarshalError unmarshals an error response for a JSON RPC service. +func UnmarshalError(req *request.Request) { + defer req.HTTPResponse.Body.Close() + + var jsonErr jsonErrorResponse + err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + return + } + + codes := strings.SplitN(jsonErr.Code, "#", 2) + req.Error = awserr.NewRequestFailure( + awserr.New(codes[len(codes)-1], jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + req.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go new file mode 100644 index 00000000000..2e0e205af37 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go @@ -0,0 +1,59 @@ +// Package restjson provides RESTful JSON serialization of AWS +// requests and responses. +package restjson + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-json.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +// BuildHandler is a named request handler for building restjson protocol +// requests +var BuildHandler = request.NamedHandler{ + Name: "awssdk.restjson.Build", + Fn: Build, +} + +// UnmarshalHandler is a named request handler for unmarshaling restjson +// protocol requests +var UnmarshalHandler = request.NamedHandler{ + Name: "awssdk.restjson.Unmarshal", + Fn: Unmarshal, +} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restjson +// protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{ + Name: "awssdk.restjson.UnmarshalMeta", + Fn: UnmarshalMeta, +} + +// Build builds a request for the REST JSON protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + if v := r.HTTPRequest.Header.Get("Content-Type"); len(v) == 0 { + r.HTTPRequest.Header.Set("Content-Type", "application/json") + } + jsonrpc.Build(r) + } +} + +// Unmarshal unmarshals a response body for the REST JSON protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + jsonrpc.Unmarshal(r) + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST JSON protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go new file mode 100644 index 00000000000..d756d8cc529 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go @@ -0,0 +1,134 @@ +package restjson + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + errorTypeHeader = "X-Amzn-Errortype" + errorMessageHeader = "X-Amzn-Errormessage" +) + +// UnmarshalTypedError provides unmarshaling errors API response errors +// for both typed and untyped errors. +type UnmarshalTypedError struct { + exceptions map[string]func(protocol.ResponseMetadata) error +} + +// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the +// set of exception names to the error unmarshalers +func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { + return &UnmarshalTypedError{ + exceptions: exceptions, + } +} + +// UnmarshalError attempts to unmarshal the HTTP response error as a known +// error type. If unable to unmarshal the error type, the generic SDK error +// type will be used. +func (u *UnmarshalTypedError) UnmarshalError( + resp *http.Response, + respMeta protocol.ResponseMetadata, +) (error, error) { + + code := resp.Header.Get(errorTypeHeader) + msg := resp.Header.Get(errorMessageHeader) + + body := resp.Body + if len(code) == 0 { + // If unable to get code from HTTP headers have to parse JSON message + // to determine what kind of exception this will be. + var buf bytes.Buffer + var jsonErr jsonErrorResponse + teeReader := io.TeeReader(resp.Body, &buf) + err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) + if err != nil { + return nil, err + } + + body = ioutil.NopCloser(&buf) + code = jsonErr.Code + msg = jsonErr.Message + } + + // If code has colon separators remove them so can compare against modeled + // exception names. + code = strings.SplitN(code, ":", 2)[0] + + if fn, ok := u.exceptions[code]; ok { + // If exception code is know, use associated constructor to get a value + // for the exception that the JSON body can be unmarshaled into. + v := fn(respMeta) + if err := jsonutil.UnmarshalJSONCaseInsensitive(v, body); err != nil { + return nil, err + } + + if err := rest.UnmarshalResponse(resp, v, true); err != nil { + return nil, err + } + + return v, nil + } + + // fallback to unmodeled generic exceptions + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil +} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restjson +// protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{ + Name: "awssdk.restjson.UnmarshalError", + Fn: UnmarshalError, +} + +// UnmarshalError unmarshals a response error for the REST JSON protocol. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var jsonErr jsonErrorResponse + err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + code := r.HTTPResponse.Header.Get(errorTypeHeader) + if code == "" { + code = jsonErr.Code + } + msg := r.HTTPResponse.Header.Get(errorMessageHeader) + if msg == "" { + msg = jsonErr.Message + } + + code = strings.SplitN(code, ":", 2)[0] + r.Error = awserr.NewRequestFailure( + awserr.New(code, jsonErr.Message, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"code"` + Message string `json:"message"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go new file mode 100644 index 00000000000..948f060cab8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go @@ -0,0 +1,1354 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opGetRoleCredentials = "GetRoleCredentials" + +// GetRoleCredentialsRequest generates a "aws/request.Request" representing the +// client's request for the GetRoleCredentials operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetRoleCredentials for more information on using the GetRoleCredentials +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetRoleCredentialsRequest method. +// req, resp := client.GetRoleCredentialsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials +func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *request.Request, output *GetRoleCredentialsOutput) { + op := &request.Operation{ + Name: opGetRoleCredentials, + HTTPMethod: "GET", + HTTPPath: "/federation/credentials", + } + + if input == nil { + input = &GetRoleCredentialsInput{} + } + + output = &GetRoleCredentialsOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// GetRoleCredentials API operation for AWS Single Sign-On. +// +// Returns the STS short-term credentials for a given role name that is assigned +// to the user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation GetRoleCredentials for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials +func (c *SSO) GetRoleCredentials(input *GetRoleCredentialsInput) (*GetRoleCredentialsOutput, error) { + req, out := c.GetRoleCredentialsRequest(input) + return out, req.Send() +} + +// GetRoleCredentialsWithContext is the same as GetRoleCredentials with the addition of +// the ability to pass a context and additional request options. +// +// See GetRoleCredentials for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) GetRoleCredentialsWithContext(ctx aws.Context, input *GetRoleCredentialsInput, opts ...request.Option) (*GetRoleCredentialsOutput, error) { + req, out := c.GetRoleCredentialsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAccountRoles = "ListAccountRoles" + +// ListAccountRolesRequest generates a "aws/request.Request" representing the +// client's request for the ListAccountRoles operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAccountRoles for more information on using the ListAccountRoles +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAccountRolesRequest method. +// req, resp := client.ListAccountRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles +func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *request.Request, output *ListAccountRolesOutput) { + op := &request.Operation{ + Name: opListAccountRoles, + HTTPMethod: "GET", + HTTPPath: "/assignment/roles", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAccountRolesInput{} + } + + output = &ListAccountRolesOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// ListAccountRoles API operation for AWS Single Sign-On. +// +// Lists all roles that are assigned to the user for a given AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation ListAccountRoles for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles +func (c *SSO) ListAccountRoles(input *ListAccountRolesInput) (*ListAccountRolesOutput, error) { + req, out := c.ListAccountRolesRequest(input) + return out, req.Send() +} + +// ListAccountRolesWithContext is the same as ListAccountRoles with the addition of +// the ability to pass a context and additional request options. +// +// See ListAccountRoles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountRolesWithContext(ctx aws.Context, input *ListAccountRolesInput, opts ...request.Option) (*ListAccountRolesOutput, error) { + req, out := c.ListAccountRolesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAccountRolesPages iterates over the pages of a ListAccountRoles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccountRoles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccountRoles operation. +// pageNum := 0 +// err := client.ListAccountRolesPages(params, +// func(page *sso.ListAccountRolesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSO) ListAccountRolesPages(input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool) error { + return c.ListAccountRolesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAccountRolesPagesWithContext same as ListAccountRolesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountRolesPagesWithContext(ctx aws.Context, input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAccountRolesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAccountRolesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAccountRolesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListAccounts = "ListAccounts" + +// ListAccountsRequest generates a "aws/request.Request" representing the +// client's request for the ListAccounts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAccounts for more information on using the ListAccounts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAccountsRequest method. +// req, resp := client.ListAccountsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts +func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Request, output *ListAccountsOutput) { + op := &request.Operation{ + Name: opListAccounts, + HTTPMethod: "GET", + HTTPPath: "/assignment/accounts", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAccountsInput{} + } + + output = &ListAccountsOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// ListAccounts API operation for AWS Single Sign-On. +// +// Lists all AWS accounts assigned to the user. These AWS accounts are assigned +// by the administrator of the account. For more information, see Assign User +// Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) +// in the AWS SSO User Guide. This operation returns a paginated response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation ListAccounts for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts +func (c *SSO) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) { + req, out := c.ListAccountsRequest(input) + return out, req.Send() +} + +// ListAccountsWithContext is the same as ListAccounts with the addition of +// the ability to pass a context and additional request options. +// +// See ListAccounts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountsWithContext(ctx aws.Context, input *ListAccountsInput, opts ...request.Option) (*ListAccountsOutput, error) { + req, out := c.ListAccountsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAccountsPages iterates over the pages of a ListAccounts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccounts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccounts operation. +// pageNum := 0 +// err := client.ListAccountsPages(params, +// func(page *sso.ListAccountsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSO) ListAccountsPages(input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool) error { + return c.ListAccountsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAccountsPagesWithContext same as ListAccountsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountsPagesWithContext(ctx aws.Context, input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAccountsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAccountsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAccountsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opLogout = "Logout" + +// LogoutRequest generates a "aws/request.Request" representing the +// client's request for the Logout operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See Logout for more information on using the Logout +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the LogoutRequest method. +// req, resp := client.LogoutRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout +func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *LogoutOutput) { + op := &request.Operation{ + Name: opLogout, + HTTPMethod: "POST", + HTTPPath: "/logout", + } + + if input == nil { + input = &LogoutInput{} + } + + output = &LogoutOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// Logout API operation for AWS Single Sign-On. +// +// Removes the client- and server-side session that is associated with the user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation Logout for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// * UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// * TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout +func (c *SSO) Logout(input *LogoutInput) (*LogoutOutput, error) { + req, out := c.LogoutRequest(input) + return out, req.Send() +} + +// LogoutWithContext is the same as Logout with the addition of +// the ability to pass a context and additional request options. +// +// See Logout for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) LogoutWithContext(ctx aws.Context, input *LogoutInput, opts ...request.Option) (*LogoutOutput, error) { + req, out := c.LogoutRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Provides information about your AWS account. +type AccountInfo struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS account that is assigned to the user. + AccountId *string `locationName:"accountId" type:"string"` + + // The display name of the AWS account that is assigned to the user. + AccountName *string `locationName:"accountName" type:"string"` + + // The email address of the AWS account that is assigned to the user. + EmailAddress *string `locationName:"emailAddress" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccountInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccountInfo) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *AccountInfo) SetAccountId(v string) *AccountInfo { + s.AccountId = &v + return s +} + +// SetAccountName sets the AccountName field's value. +func (s *AccountInfo) SetAccountName(v string) *AccountInfo { + s.AccountName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *AccountInfo) SetEmailAddress(v string) *AccountInfo { + s.EmailAddress = &v + return s +} + +type GetRoleCredentialsInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetRoleCredentialsInput's + // String and GoString methods. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // The identifier for the AWS account that is assigned to the user. + // + // AccountId is a required field + AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` + + // The friendly name of the role that is assigned to the user. + // + // RoleName is a required field + RoleName *string `location:"querystring" locationName:"role_name" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetRoleCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetRoleCredentialsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRoleCredentialsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRoleCredentialsInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *GetRoleCredentialsInput) SetAccessToken(v string) *GetRoleCredentialsInput { + s.AccessToken = &v + return s +} + +// SetAccountId sets the AccountId field's value. +func (s *GetRoleCredentialsInput) SetAccountId(v string) *GetRoleCredentialsInput { + s.AccountId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *GetRoleCredentialsInput) SetRoleName(v string) *GetRoleCredentialsInput { + s.RoleName = &v + return s +} + +type GetRoleCredentialsOutput struct { + _ struct{} `type:"structure"` + + // The credentials for the role that is assigned to the user. + RoleCredentials *RoleCredentials `locationName:"roleCredentials" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetRoleCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetRoleCredentialsOutput) GoString() string { + return s.String() +} + +// SetRoleCredentials sets the RoleCredentials field's value. +func (s *GetRoleCredentialsOutput) SetRoleCredentials(v *RoleCredentials) *GetRoleCredentialsOutput { + s.RoleCredentials = v + return s +} + +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +type InvalidRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { + return &InvalidRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestException) Code() string { + return "InvalidRequestException" +} + +// Message returns the exception's message. +func (s *InvalidRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestException) OrigErr() error { + return nil +} + +func (s *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListAccountRolesInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ListAccountRolesInput's + // String and GoString methods. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // The identifier for the AWS account that is assigned to the user. + // + // AccountId is a required field + AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` + + // The number of items that clients can request per page. + MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` + + // The page token from the previous response output when you request subsequent + // pages. + NextToken *string `location:"querystring" locationName:"next_token" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountRolesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccountRolesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccountRolesInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *ListAccountRolesInput) SetAccessToken(v string) *ListAccountRolesInput { + s.AccessToken = &v + return s +} + +// SetAccountId sets the AccountId field's value. +func (s *ListAccountRolesInput) SetAccountId(v string) *ListAccountRolesInput { + s.AccountId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAccountRolesInput) SetMaxResults(v int64) *ListAccountRolesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountRolesInput) SetNextToken(v string) *ListAccountRolesInput { + s.NextToken = &v + return s +} + +type ListAccountRolesOutput struct { + _ struct{} `type:"structure"` + + // The page token client that is used to retrieve the list of accounts. + NextToken *string `locationName:"nextToken" type:"string"` + + // A paginated response with the list of roles and the next token if more results + // are available. + RoleList []*RoleInfo `locationName:"roleList" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountRolesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountRolesOutput) SetNextToken(v string) *ListAccountRolesOutput { + s.NextToken = &v + return s +} + +// SetRoleList sets the RoleList field's value. +func (s *ListAccountRolesOutput) SetRoleList(v []*RoleInfo) *ListAccountRolesOutput { + s.RoleList = v + return s +} + +type ListAccountsInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ListAccountsInput's + // String and GoString methods. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // This is the number of items clients can request per page. + MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` + + // (Optional) When requesting subsequent pages, this is the page token from + // the previous response output. + NextToken *string `location:"querystring" locationName:"next_token" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccountsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccountsInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *ListAccountsInput) SetAccessToken(v string) *ListAccountsInput { + s.AccessToken = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAccountsInput) SetMaxResults(v int64) *ListAccountsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountsInput) SetNextToken(v string) *ListAccountsInput { + s.NextToken = &v + return s +} + +type ListAccountsOutput struct { + _ struct{} `type:"structure"` + + // A paginated response with the list of account information and the next token + // if more results are available. + AccountList []*AccountInfo `locationName:"accountList" type:"list"` + + // The page token client that is used to retrieve the list of accounts. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountsOutput) GoString() string { + return s.String() +} + +// SetAccountList sets the AccountList field's value. +func (s *ListAccountsOutput) SetAccountList(v []*AccountInfo) *ListAccountsOutput { + s.AccountList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountsOutput) SetNextToken(v string) *ListAccountsOutput { + s.NextToken = &v + return s +} + +type LogoutInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the AWS SSO OIDC API Reference Guide. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by LogoutInput's + // String and GoString methods. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogoutInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogoutInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LogoutInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LogoutInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *LogoutInput) SetAccessToken(v string) *LogoutInput { + s.AccessToken = &v + return s +} + +type LogoutOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogoutOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogoutOutput) GoString() string { + return s.String() +} + +// The specified resource doesn't exist. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Provides information about the role credentials that are assigned to the +// user. +type RoleCredentials struct { + _ struct{} `type:"structure"` + + // The identifier used for the temporary security credentials. For more information, + // see Using Temporary Security Credentials to Request Access to AWS Resources + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + AccessKeyId *string `locationName:"accessKeyId" type:"string"` + + // The date on which temporary security credentials expire. + Expiration *int64 `locationName:"expiration" type:"long"` + + // The key that is used to sign the request. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + // + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RoleCredentials's + // String and GoString methods. + SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"` + + // The token used for temporary credentials. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + // + // SessionToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RoleCredentials's + // String and GoString methods. + SessionToken *string `locationName:"sessionToken" type:"string" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoleCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoleCredentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *RoleCredentials) SetAccessKeyId(v string) *RoleCredentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *RoleCredentials) SetExpiration(v int64) *RoleCredentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *RoleCredentials) SetSecretAccessKey(v string) *RoleCredentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *RoleCredentials) SetSessionToken(v string) *RoleCredentials { + s.SessionToken = &v + return s +} + +// Provides information about the role that is assigned to the user. +type RoleInfo struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS account assigned to the user. + AccountId *string `locationName:"accountId" type:"string"` + + // The friendly name of the role that is assigned to the user. + RoleName *string `locationName:"roleName" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoleInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoleInfo) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *RoleInfo) SetAccountId(v string) *RoleInfo { + s.AccountId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *RoleInfo) SetRoleName(v string) *RoleInfo { + s.RoleName = &v + return s +} + +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +type TooManyRequestsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TooManyRequestsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TooManyRequestsException) GoString() string { + return s.String() +} + +func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { + return &TooManyRequestsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TooManyRequestsException) Code() string { + return "TooManyRequestsException" +} + +// Message returns the exception's message. +func (s *TooManyRequestsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyRequestsException) OrigErr() error { + return nil +} + +func (s *TooManyRequestsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +type UnauthorizedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedException) GoString() string { + return s.String() +} + +func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { + return &UnauthorizedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnauthorizedException) Code() string { + return "UnauthorizedException" +} + +// Message returns the exception's message. +func (s *UnauthorizedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnauthorizedException) OrigErr() error { + return nil +} + +func (s *UnauthorizedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnauthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnauthorizedException) RequestID() string { + return s.RespMetadata.RequestID +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go new file mode 100644 index 00000000000..92d82b2afb6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go @@ -0,0 +1,44 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sso provides the client and types for making API +// requests to AWS Single Sign-On. +// +// AWS Single Sign-On Portal is a web service that makes it easy for you to +// assign user access to AWS SSO resources such as the user portal. Users can +// get AWS account applications and roles assigned to them and get federated +// into the application. +// +// For general information about AWS SSO, see What is AWS Single Sign-On? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +// in the AWS SSO User Guide. +// +// This API reference guide describes the AWS SSO Portal operations that you +// can call programatically and includes detailed information on data types +// and errors. +// +// AWS provides SDKs that consist of libraries and sample code for various programming +// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs +// provide a convenient way to create programmatic access to AWS SSO and other +// AWS services. For more information about the AWS SDKs, including how to download +// and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service. +// +// See sso package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/ +// +// Using the Client +// +// To contact AWS Single Sign-On with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Single Sign-On client SSO for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/#New +package sso diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go new file mode 100644 index 00000000000..77a6792e352 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go @@ -0,0 +1,44 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // Indicates that a problem occurred with the input to the request. For example, + // a required parameter might be missing or out of range. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The specified resource doesn't exist. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeTooManyRequestsException for service response error code + // "TooManyRequestsException". + // + // Indicates that the request is being made too frequently and is more than + // what the server can handle. + ErrCodeTooManyRequestsException = "TooManyRequestsException" + + // ErrCodeUnauthorizedException for service response error code + // "UnauthorizedException". + // + // Indicates that the request is not authorized. This can happen due to an invalid + // access token in the request. + ErrCodeUnauthorizedException = "UnauthorizedException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "InvalidRequestException": newErrorInvalidRequestException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "TooManyRequestsException": newErrorTooManyRequestsException, + "UnauthorizedException": newErrorUnauthorizedException, +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go new file mode 100644 index 00000000000..7a28dc797e9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go @@ -0,0 +1,105 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// SSO provides the API operation methods for making requests to +// AWS Single Sign-On. See this package's package overview docs +// for details on the service. +// +// SSO methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SSO struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "SSO" // Name of service. + EndpointsID = "portal.sso" // ID to lookup a service endpoint with. + ServiceID = "SSO" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the SSO client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a SSO client from just a session. +// svc := sso.New(mySession) +// +// // Create a SSO client with additional configuration +// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "awsssoportal" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSO { + svc := &SSO{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-06-10", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SSO operation and runs any +// custom request initialization. +func (c *SSO) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go new file mode 100644 index 00000000000..4cac247c188 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go @@ -0,0 +1,86 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ssoiface provides an interface to enable mocking the AWS Single Sign-On service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package ssoiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sso" +) + +// SSOAPI provides an interface to enable mocking the +// sso.SSO service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Single Sign-On. +// func myFunc(svc ssoiface.SSOAPI) bool { +// // Make svc.GetRoleCredentials request +// } +// +// func main() { +// sess := session.New() +// svc := sso.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSSOClient struct { +// ssoiface.SSOAPI +// } +// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSSOClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type SSOAPI interface { + GetRoleCredentials(*sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) + GetRoleCredentialsWithContext(aws.Context, *sso.GetRoleCredentialsInput, ...request.Option) (*sso.GetRoleCredentialsOutput, error) + GetRoleCredentialsRequest(*sso.GetRoleCredentialsInput) (*request.Request, *sso.GetRoleCredentialsOutput) + + ListAccountRoles(*sso.ListAccountRolesInput) (*sso.ListAccountRolesOutput, error) + ListAccountRolesWithContext(aws.Context, *sso.ListAccountRolesInput, ...request.Option) (*sso.ListAccountRolesOutput, error) + ListAccountRolesRequest(*sso.ListAccountRolesInput) (*request.Request, *sso.ListAccountRolesOutput) + + ListAccountRolesPages(*sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool) error + ListAccountRolesPagesWithContext(aws.Context, *sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool, ...request.Option) error + + ListAccounts(*sso.ListAccountsInput) (*sso.ListAccountsOutput, error) + ListAccountsWithContext(aws.Context, *sso.ListAccountsInput, ...request.Option) (*sso.ListAccountsOutput, error) + ListAccountsRequest(*sso.ListAccountsInput) (*request.Request, *sso.ListAccountsOutput) + + ListAccountsPages(*sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool) error + ListAccountsPagesWithContext(aws.Context, *sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool, ...request.Option) error + + Logout(*sso.LogoutInput) (*sso.LogoutOutput, error) + LogoutWithContext(aws.Context, *sso.LogoutInput, ...request.Option) (*sso.LogoutOutput, error) + LogoutRequest(*sso.LogoutInput) (*request.Request, *sso.LogoutOutput) +} + +var _ SSOAPI = (*sso.SSO)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 00000000000..1e7fa65577e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,3437 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + output = &AssumeRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRole API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials that you can use to access +// Amazon Web Services resources that you might not normally have access to. +// These temporary credentials consist of an access key ID, a secret access +// key, and a security token. Typically, you use AssumeRole within your account +// or for cross-account access. For a comparison of AssumeRole with other API +// operations that produce temporary credentials, see Requesting Temporary Security +// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Permissions +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any Amazon Web Services service with the following exception: +// You cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken +// API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plaintext that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// Amazon Web Services API calls to access resources in the account that owns +// the role. You cannot use session policies to grant more permissions than +// those allowed by the identity-based policy of the role that is being assumed. +// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// When you create a role, you create two policies: A role trust policy that +// specifies who can assume the role and a permissions policy that specifies +// what can be done with the role. You specify the trusted principal who is +// allowed to assume the role in the role trust policy. +// +// To assume a role from a different account, your Amazon Web Services account +// must be trusted by the role. The trust relationship is defined in the role's +// trust policy when the role is created. That trust policy states which accounts +// are allowed to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have permissions +// that are delegated from the user account administrator. The administrator +// must attach a policy that allows the user to call AssumeRole for the ARN +// of the role in the other account. +// +// To allow a user to assume a role in the same account, you can do either of +// the following: +// +// * Attach a policy to the user that allows the user to call AssumeRole +// (as long as the role's trust policy trusts the account). +// +// * Add the user as a principal directly in the role's trust policy. +// +// You can do either because the role’s trust policy acts as an IAM resource-based +// policy. When a resource-based policy grants access to a principal in the +// same account, no additional identity-based policy is required. For more information +// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These tags are +// called session tags. For more information about session tags, see Passing +// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// Using MFA with AssumeRole +// +// (Optional) You can include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios to ensure +// that the user that assumes the role has been authenticated with an Amazon +// Web Services MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication. If the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication +// might look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA device produces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + output = &AssumeRoleWithSAMLOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithSAML API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based Amazon Web +// Services access without user-specific credentials or configuration. For a +// comparison of AssumeRoleWithSAML with the other API operations that produce +// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to Amazon Web +// Services services. +// +// Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) +// limits your CLI or Amazon Web Services API role session to a maximum of one +// hour. When you use the AssumeRole API operation to assume a role, you can +// specify the duration of your role session with the DurationSeconds parameter. +// You can specify a parameter value of up to 43200 seconds (12 hours), depending +// on the maximum session duration setting for your role. However, if you assume +// a role using role chaining and provide a DurationSeconds parameter value +// greater than one hour, the operation fails. +// +// Permissions +// +// The temporary security credentials created by AssumeRoleWithSAML can be used +// to make API calls to any Amazon Web Services service with the following exception: +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plaintext that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// Amazon Web Services API calls to access resources in the account that owns +// the role. You cannot use session policies to grant more permissions than +// those allowed by the identity-based policy of the role that is being assumed. +// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services +// security credentials. The identity of the caller is validated by using keys +// in the metadata document that is uploaded for the SAML provider entity for +// your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. +// The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the persistent +// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your SAML assertion +// as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed session policies +// and session tags into a packed binary format that has a separate limit. Your +// request can fail for this limit even if your plaintext meets the other requirements. +// The PackedPolicySize response element indicates by percentage how close the +// policies and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, session tags override the role's tags with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// SAML Configuration +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by Amazon Web Services. +// Additionally, you must use Identity and Access Management (IAM) to create +// a SAML provider entity in your Amazon Web Services account that represents +// your identity provider. You must also create an IAM role that specifies this +// SAML provider in its trust policy. +// +// For more information, see the following resources: +// +// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithSAML for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry +// the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + output = &AssumeRoleWithWebIdentityOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider. Example providers +// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID +// Connect-compatible identity provider. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can +// use Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide +// (http://aws.amazon.com/sdkforios/) and the Amazon Web Services SDK for Android +// Developer Guide (http://aws.amazon.com/sdkforandroid/) to uniquely identify +// a user. You can also supply the user with a consistent identity throughout +// the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in Amazon Web Services SDK for Android Developer Guide and Amazon Cognito +// Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the Amazon Web Services SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web +// Services security credentials. Therefore, you can distribute an application +// (for example, on mobile devices) that requests temporary security credentials +// without including long-term Amazon Web Services credentials in the application. +// You also don't need to deploy server-based proxy services that use long-term +// Amazon Web Services credentials. Instead, the identity of the caller is validated +// by using a token from the web identity provider. For a comparison of AssumeRoleWithWebIdentity +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to Amazon Web Services service +// API operations. +// +// Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Permissions +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can +// be used to make API calls to any Amazon Web Services service with the following +// exception: you cannot call the STS GetFederationToken or GetSessionToken +// API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plaintext that you use for both inline +// and managed session policies can't exceed 2,048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// Amazon Web Services API calls to access resources in the account that owns +// the role. You cannot use session policies to grant more permissions than +// those allowed by the identity-based policy of the role that is being assumed. +// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your web identity +// token as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed session policies +// and session tags into a packed binary format that has a separate limit. Your +// request can fail for this limit even if your plaintext meets the other requirements. +// The PackedPolicySize response element indicates by percentage how close the +// policies and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, the session tag overrides the role tag with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// Identities +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided web identity token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// * Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). +// Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then +// using those credentials to make a request to Amazon Web Services. +// +// * Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. +// +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithWebIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the identity provider (IDP) that +// was asked to verify the incoming identity token could not be reached. This +// is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the identity provider might be down or not responding. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry +// the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + output = &DecodeAuthorizationMessageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DecodeAuthorizationMessage API operation for AWS Security Token Service. +// +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an Amazon Web Services request. +// +// For example, if a user is not authorized to perform an operation that he +// or she has requested, the request returns a Client.UnauthorizedOperation +// response (an HTTP 403 response). Some Amazon Web Services operations additionally +// return an encoded message that can provide details about this authorization +// failure. +// +// Only certain Amazon Web Services operations return an encoded authorization +// message. The documentation for an individual operation indicates whether +// that operation returns an encoded message in addition to returning an HTTP +// code. +// +// The message is encoded because the details of the authorization status can +// contain privileged information that the user who requested the operation +// should not see. To decode an authorization status message, a user must be +// granted permissions through an IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// to request the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) +// action. +// +// The decoded message includes the following type of information: +// +// * Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// * The principal who made the request. +// +// * The requested action. +// +// * The requested resource. +// +// * The values of condition keys in the context of the user's request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation DecodeAuthorizationMessage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAccessKeyInfo = "GetAccessKeyInfo" + +// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessKeyInfo operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccessKeyInfoRequest method. +// req, resp := client.GetAccessKeyInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { + op := &request.Operation{ + Name: opGetAccessKeyInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyInfoInput{} + } + + output = &GetAccessKeyInfoOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccessKeyInfo API operation for AWS Security Token Service. +// +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) +// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). +// For more information about access keys, see Managing Access Keys for IAM +// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// Amazon Web Services account to which the keys belong. Access key IDs beginning +// with AKIA are long-term credentials for an IAM user or the Amazon Web Services +// account root user. Access key IDs beginning with ASIA are temporary credentials +// that are created using STS operations. If the account in the response belongs +// to you, you can sign in as the root user and review your root user access +// keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail +// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. +// +// This operation does not indicate the state of the access key. The key might +// be active, inactive, or deleted. Active keys might not have permissions to +// perform an operation. Providing a deleted access key might return an error +// that the key doesn't exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetAccessKeyInfo for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + return out, req.Send() +} + +// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccessKeyInfo for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + output = &GetCallerIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCallerIdentity API operation for AWS Security Token Service. +// +// Returns details about the IAM user or role whose credentials are used to +// call the operation. +// +// No permissions are required to perform this operation. If an administrator +// adds a policy to your IAM user or role that explicitly denies access to the +// sts:GetCallerIdentity action, you can still perform this operation. Permissions +// are not required because the same information is returned when an IAM user +// or role is denied access. To view an example response, see I Am Not Authorized +// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetCallerIdentity for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + output = &GetFederationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetFederationToken API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. You must +// call the GetFederationToken operation using the long-term security credentials +// of an IAM user. As a result, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// For a comparison of GetFederationToken with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. +// +// You can also call GetFederationToken using the security credentials of an +// Amazon Web Services account root user, but we do not recommend it. Instead, +// we recommend that you create an IAM user for the purpose of the proxy application. +// Then attach a policy to the IAM user that limits federated users to only +// the actions and resources that they need to access. For more information, +// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// Session duration +// +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// session duration is 43,200 seconds (12 hours). Temporary credentials obtained +// by using the Amazon Web Services account root user credentials have a maximum +// duration of 3,600 seconds (1 hour). +// +// Permissions +// +// You can use the temporary credentials created by GetFederationToken in any +// Amazon Web Services service except the following: +// +// * You cannot call any IAM operations using the CLI or the Amazon Web Services +// API. +// +// * You cannot call any STS operations except GetCallerIdentity. +// +// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plaintext that you use for both inline +// and managed session policies can't exceed 2,048 characters. +// +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM +// user policies and the session policies that you pass. This gives you a way +// to further restrict the permissions for a federated user. You cannot use +// session policies to grant more permissions than those that are defined in +// the permissions policy of the IAM user. For more information, see Session +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to +// create temporary security credentials, see GetFederationToken—Federation +// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session +// in the Principal element of the policy, the session has the permissions allowed +// by the policy. These permissions are granted in addition to the permissions +// granted by the session policies. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see Passing Session +// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// Tag key–value pairs are not case sensitive, but case is preserved. This +// means that you cannot have separate Department and department tag keys. Assume +// that the user that you are federating has the Department=Marketing tag and +// you pass the department=engineering session tag. Department and department +// are not saved as separate tags, and the session tag passed in the request +// takes precedence over the user tag. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetFederationToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + output = &GetSessionTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSessionToken API operation for AWS Security Token Service. +// +// Returns a set of temporary credentials for an Amazon Web Services account +// or IAM user. The credentials consist of an access key ID, a secret access +// key, and a security token. Typically, you use GetSessionToken if you want +// to use MFA to protect programmatic calls to specific Amazon Web Services +// API operations like Amazon EC2 StopInstances. MFA-enabled IAM users would +// need to call GetSessionToken and submit an MFA code that is associated with +// their MFA device. Using the temporary security credentials that are returned +// from the call, IAM users can then make programmatic calls to API operations +// that require MFA authentication. If you do not supply a correct MFA code, +// then the API returns an access denied error. For a comparison of GetSessionToken +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Session Duration +// +// The GetSessionToken operation must be called by using the long-term Amazon +// Web Services security credentials of the Amazon Web Services account root +// user or an IAM user. Credentials that are created by IAM users are valid +// for the duration that you specify. This duration can range from 900 seconds +// (15 minutes) up to a maximum of 129,600 seconds (36 hours), with a default +// of 43,200 seconds (12 hours). Credentials based on account credentials can +// range from 900 seconds (15 minutes) up to 3,600 seconds (1 hour), with a +// default of 1 hour. +// +// Permissions +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any Amazon Web Services service with the following exceptions: +// +// * You cannot call any IAM API operations unless MFA authentication information +// is included in the request. +// +// * You cannot call any STS API except AssumeRole or GetCallerIdentity. +// +// We recommend that you do not call GetSessionToken with Amazon Web Services +// account root user credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with Amazon Web Services. +// +// The credentials that are returned by GetSessionToken are based on permissions +// associated with the user whose credentials were used to call the operation. +// If GetSessionToken is called using Amazon Web Services account root user +// credentials, the temporary credentials have root user permissions. Similarly, +// if GetSessionToken is called using the credentials of an IAM user, the temporary +// credentials have the same permissions as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetSessionToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value specified can range + // from 900 seconds (15 minutes) up to the maximum session duration set for + // the role. The maximum session duration setting can have a value from 1 hour + // to 12 hours. If you specify a value higher than this setting or the administrator + // setting (whichever is lower), the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. + // + // Role chaining limits your Amazon Web Services CLI or Amazon Web Services + // API role session to a maximum of one hour. When you use the AssumeRole API + // operation to assume a role, you can specify the duration of your role session + // with the DurationSeconds parameter. You can specify a parameter value of + // up to 43200 seconds (12 hours), depending on the maximum session duration + // setting for your role. However, if you assume a role using role chaining + // and provide a DurationSeconds parameter value greater than one hour, the + // operation fails. To learn how to view the maximum value for your role, see + // View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A cross-account + // role is usually set up to trust everyone in an account. Therefore, the administrator + // of the trusting account might send an external ID to the administrator of + // the trusted account. That way, only someone with the ID can assume the role, + // rather than everyone in the account. For more information about the external + // ID, see How to Use an External ID When Granting Access to Your Amazon Web + // Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent Amazon Web Services API calls + // to access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent Amazon Web Services API calls to access resources in the account + // that owns the role. You cannot use session policies to grant more permissions + // than those allowed by the identity-based policy of the role that is being + // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the role to assume. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests that use the temporary security credentials will expose the role + // session name to the external account in their CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The source identity specified by the principal that is calling the AssumeRole + // operation. + // + // You can require users to specify a source identity when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition + // key to further control access to Amazon Web Services resources based on the + // value of source identity. For more information about using source identity, + // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@-. You cannot + // use a value that begins with the text aws:. This prefix is reserved for Amazon + // Web Services internal use. + SourceIdentity *string `min:"2" type:"string"` + + // A list of session tags that you want to pass. Each session tag consists of + // a key name and an associated value. For more information about session tags, + // see Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters, and the values can’t exceed + // 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the role. When you do, session tags override a role tag with the same + // key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + // + // Additionally, if you used temporary credentials to perform this operation, + // the new session inherits any transitive session tags from the calling session. + // If you pass a session tag with the same key as an inherited tag, the operation + // fails. To view the inherited tags for a session, see the CloudTrail logs. + // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs) + // in the IAM User Guide. + Tags []*Tag `type:"list"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA. (In other words, if the policy includes a condition + // that tests for MFA). If the role being assumed requires MFA and if the TokenCode + // value is missing or expired, the AssumeRole call returns an "access denied" + // error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` + + // A list of keys for session tags that you want to set as transitive. If you + // set a tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. + // + // This parameter is optional. When you set session tags as transitive, the + // session policy and session tags packed binary limit is not affected. + // + // If you choose not to specify a transitive tag key, then no tags are passed + // from this session to any subsequent sessions. + TransitiveTagKeys []*string `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.SourceIdentity != nil && len(*s.SourceIdentity) < 2 { + invalidParams.Add(request.NewErrParamMinLen("SourceIdentity", 2)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { + s.DurationSeconds = &v + return s +} + +// SetExternalId sets the ExternalId field's value. +func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { + s.ExternalId = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { + s.PolicyArns = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { + s.RoleSessionName = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { + s.SerialNumber = &v + return s +} + +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleInput) SetSourceIdentity(v string) *AssumeRoleInput { + s.SourceIdentity = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { + s.Tags = v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { + s.TokenCode = &v + return s +} + +// SetTransitiveTagKeys sets the TransitiveTagKeys field's value. +func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { + s.TransitiveTagKeys = v + return s +} + +// Contains the response to a successful AssumeRole request, including temporary +// Amazon Web Services credentials that can be used to make Amazon Web Services +// requests. +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The source identity specified by the principal that is calling the AssumeRole + // operation. + // + // You can require users to specify a source identity when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition + // key to further control access to Amazon Web Services resources based on the + // value of source identity. For more information about using source identity, + // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { + s.AssumedRoleUser = v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { + s.PackedPolicySize = &v + return s +} + +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleOutput) SetSourceIdentity(v string) *AssumeRoleOutput { + s.SourceIdentity = &v + return s +} + +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent Amazon Web Services API calls + // to access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent Amazon Web Services API calls to access resources in the account + // that owns the role. You cannot use session policies to grant more permissions + // than those allowed by the identity-based policy of the role that is being + // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + // + // PrincipalArn is a required field + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. + // + // SAMLAssertion is a required field + SAMLAssertion *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { + s.PolicyArns = v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { + s.PrincipalArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { + s.RoleArn = &v + return s +} + +// SetSAMLAssertion sets the SAMLAssertion field's value. +func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { + s.SAMLAssertion = &v + return s +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary Amazon Web Services credentials that can be used to make Amazon +// Web Services requests. +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the following: + // + // * The Issuer response value. + // + // * The Amazon Web Services account ID. + // + // * The friendly name (the last part of the ARN) of the SAML provider in + // IAM. + // + // The combination of NameQualifier and Subject can be used to uniquely identify + // a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value in the SourceIdentity attribute in the SAML assertion. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with + // that user. After the source identity is set, the value cannot be changed. + // It is present in the request for all actions that are taken by the role and + // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your SAML identity provider to use an attribute + // associated with your users, like user name or email, as the source identity + // when calling AssumeRoleWithSAML. You do this by adding an attribute to the + // SAML assertion. For more information about using source identity, see Monitor + // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { + s.Credentials = v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { + s.Issuer = &v + return s +} + +// SetNameQualifier sets the NameQualifier field's value. +func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { + s.NameQualifier = &v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { + s.PackedPolicySize = &v + return s +} + +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleWithSAMLOutput) SetSourceIdentity(v string) *AssumeRoleWithSAMLOutput { + s.SourceIdentity = &v + return s +} + +// SetSubject sets the Subject field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { + s.Subject = &v + return s +} + +// SetSubjectType sets the SubjectType field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { + s.SubjectType = &v + return s +} + +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent Amazon Web Services API calls + // to access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent Amazon Web Services API calls to access resources in the account + // that owns the role. You cannot use session policies to grant more permissions + // than those allowed by the identity-based policy of the role that is being + // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The fully qualified host component of the domain name of the identity provider. + // + // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com + // and graph.facebook.com are the only supported identity providers for OAuth + // 2.0 access tokens. Do not include URL schemes and port numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + // + // WebIdentityToken is a required field + WebIdentityToken *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { + s.PolicyArns = v + return s +} + +// SetProviderId sets the ProviderId field's value. +func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { + s.ProviderId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { + s.RoleSessionName = &v + return s +} + +// SetWebIdentityToken sets the WebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { + s.WebIdentityToken = &v + return s +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary Amazon Web Services credentials that can be used to make +// Amazon Web Services requests. +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The value of the source identity that is returned in the JSON web token (JWT) + // from the identity provider. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with + // that user. After the source identity is set, the value cannot be changed. + // It is present in the request for all actions that are taken by the role and + // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your identity provider to use an attribute associated + // with your users, like user name or email, as the source identity when calling + // AssumeRoleWithWebIdentity. You do this by adding a claim to the JSON web + // token. To learn more about OIDC tokens and claims, see Using Tokens with + // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) + // in the Amazon Cognito Developer Guide. For more information about using source + // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { + s.PackedPolicySize = &v + return s +} + +// SetProvider sets the Provider field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { + s.Provider = &v + return s +} + +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSourceIdentity(v string) *AssumeRoleWithWebIdentityOutput { + s.SourceIdentity = &v + return s +} + +// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { + s.SubjectFromWebIdentityToken = &v + return s +} + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by Amazon Web Services + // when the role is created. + // + // AssumedRoleId is a required field + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { + s.Arn = &v + return s +} + +// SetAssumedRoleId sets the AssumedRoleId field's value. +func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { + s.AssumedRoleId = &v + return s +} + +// Amazon Web Services credentials for API authentication. +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + // + // Expiration is a required field + Expiration *time.Time `type:"timestamp" required:"true"` + + // The secret access key that can be used to sign requests. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + // + // SessionToken is a required field + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Credentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *Credentials) SetAccessKeyId(v string) *Credentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Credentials) SetExpiration(v time.Time) *Credentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *Credentials) SetSecretAccessKey(v string) *Credentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *Credentials) SetSessionToken(v string) *Credentials { + s.SessionToken = &v + return s +} + +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + // + // EncodedMessage is a required field + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncodedMessage sets the EncodedMessage field's value. +func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { + s.EncodedMessage = &v + return s +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an Amazon +// Web Services request. +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // The API returns a response with the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// SetDecodedMessage sets the DecodedMessage field's value. +func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { + s.DecodedMessage = &v + return s +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // FederatedUserId is a required field + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FederatedUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *FederatedUser) SetArn(v string) *FederatedUser { + s.Arn = &v + return s +} + +// SetFederatedUserId sets the FederatedUserId field's value. +func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { + s.FederatedUserId = &v + return s +} + +type GetAccessKeyInfoInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters + // that can consist of any upper- or lowercase letter or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAccessKeyInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAccessKeyInfoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessKeyInfoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { + s.AccessKeyId = &v + return s +} + +type GetAccessKeyInfoOutput struct { + _ struct{} `type:"structure"` + + // The number used to identify the Amazon Web Services account. + Account *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAccessKeyInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAccessKeyInfoOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { + s.Account = &v + return s +} + +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Web Services account ID number of the account that owns or contains + // the calling entity. + Account *string `type:"string"` + + // The Amazon Web Services ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity that is making the call. The values returned are those listed + // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { + s.Account = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { + s.Arn = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { + s.UserId = &v + return s +} + +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using Amazon Web Services account root user credentials are restricted to + // a maximum of 3,600 seconds (one hour). If the specified duration is longer + // than one hour, the session obtained by using root user credentials defaults + // to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // Name is a required field + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as a managed session policy. The policies must exist in the same account + // as the IAM user that is requesting federated access. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. The plaintext that you use for both inline + // and managed session policies can't exceed 2,048 characters. You can provide + // up to 10 managed policy ARNs. For more information about ARNs, see Amazon + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // A list of session tags. Each session tag consists of a key name and an associated + // value. For more information about session tags, see Passing Session Tags + // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters and the values can’t exceed + // 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed session policies + // and session tags into a packed binary format that has a separate limit. Your + // request can fail for this limit even if your plaintext meets the other requirements. + // The PackedPolicySize response element indicates by percentage how close the + // policies and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the user you are federating. When you do, session tags override a user + // tag with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + Tags []*Tag `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { + s.DurationSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { + s.PolicyArns = v + return s +} + +// SetTags sets the Tags field's value. +func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { + s.Tags = v + return s +} + +// Contains the response to a successful GetFederationToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon +// Web Services requests. +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { + s.Credentials = v + return s +} + +// SetFederatedUser sets the FederatedUser field's value. +func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { + s.FederatedUser = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { + s.PackedPolicySize = &v + return s +} + +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions + // for Amazon Web Services account owners are restricted to a maximum of 3,600 + // seconds (one hour). If the duration is longer than one hour, the session + // for Amazon Web Services account owners defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the Amazon Web Services + // Management Console and viewing the user's security credentials. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, the user must provide a code when requesting a set of temporary + // security credentials. A user who fails to provide the code receives an "access + // denied" response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { + s.DurationSeconds = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful GetSessionToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon +// Web Services requests. +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { + s.Credentials = v + return s +} + +// A reference to the IAM managed policy that is passed as a session policy +// for a role session or a federated user session. +type PolicyDescriptorType struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource + // Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + Arn *string `locationName:"arn" min:"20" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PolicyDescriptorType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PolicyDescriptorType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PolicyDescriptorType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { + s.Arn = &v + return s +} + +// You can pass custom key-value pair attributes when you assume a role or federate +// a user. These are called session tags. You can then use the session tags +// to control access to resources. For more information, see Tagging Amazon +// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag keys can’t + // exceed 128 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 00000000000..d5307fcaa0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,11 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 00000000000..2d98d92353a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,32 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// Security Token Service (STS) enables you to request temporary, limited-privilege +// credentials for Identity and Access Management (IAM) users or for users that +// you authenticate (federated users). This guide provides descriptions of the +// STS API. For more information about using this service, see Temporary Security +// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// Using the Client +// +// To contact AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go new file mode 100644 index 00000000000..b680bbd5d70 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -0,0 +1,84 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +const ( + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // The web identity token that was passed is expired or is not valid. Get a + // new identity token from the identity provider and then retry the request. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeIDPCommunicationErrorException for service response error code + // "IDPCommunicationError". + // + // The request could not be fulfilled because the identity provider (IDP) that + // was asked to verify the incoming identity token could not be reached. This + // is often a transient error caused by network conditions. Retry the request + // a limited number of times so that you don't exceed the request rate. If the + // error persists, the identity provider might be down or not responding. + ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" + + // ErrCodeIDPRejectedClaimException for service response error code + // "IDPRejectedClaim". + // + // The identity provider (IdP) reported that authentication failed. This might + // be because the claim is invalid. + // + // If this error is returned for the AssumeRoleWithWebIdentity operation, it + // can also mean that the claim has expired or has been explicitly revoked. + ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" + + // ErrCodeInvalidAuthorizationMessageException for service response error code + // "InvalidAuthorizationMessageException". + // + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. + ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" + + // ErrCodeInvalidIdentityTokenException for service response error code + // "InvalidIdentityToken". + // + // The web identity token that was passed could not be validated by Amazon Web + // Services. Get a new identity token from the identity provider and then retry + // the request. + ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodePackedPolicyTooLargeException for service response error code + // "PackedPolicyTooLarge". + // + // The request was rejected because the total packed size of the session policies + // and session tags combined was too large. An Amazon Web Services conversion + // compresses the session policy document, session policy ARNs, and session + // tags into a packed binary format that has a separate limit. The error message + // indicates by percentage how close the policies and tags are to the upper + // size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // You could receive this error even though you meet other defined session policy + // and session tag limits. For more information, see IAM and STS Entity Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" + + // ErrCodeRegionDisabledException for service response error code + // "RegionDisabledException". + // + // STS is not activated in the requested region for the account that is being + // asked to generate credentials. The account administrator must use the IAM + // console to activate STS in that region. For more information, see Activating + // and Deactivating Amazon Web Services STS in an Amazon Web Services Region + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodeRegionDisabledException = "RegionDisabledException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 00000000000..703defd969d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,99 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. +// +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// mySession := session.Must(session.NewSession()) +// +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2011-06-15", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go new file mode 100644 index 00000000000..e2e1d6efe55 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -0,0 +1,96 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package stsiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sts" +) + +// STSAPI provides an interface to enable mocking the +// sts.STS service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Security Token Service. +// func myFunc(svc stsiface.STSAPI) bool { +// // Make svc.AssumeRole request +// } +// +// func main() { +// sess := session.New() +// svc := sts.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSTSClient struct { +// stsiface.STSAPI +// } +// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSTSClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type STSAPI interface { + AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) + AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) + + AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) + + AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) + + DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) + + GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) + + GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) + + GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) + GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) + GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) + + GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) + GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) + GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) +} + +var _ STSAPI = (*sts.STS)(nil) diff --git a/vendor/github.com/jpillora/backoff/LICENSE b/vendor/github.com/jpillora/backoff/LICENSE new file mode 100644 index 00000000000..1cc708081b3 --- /dev/null +++ b/vendor/github.com/jpillora/backoff/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Jaime Pillora + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/jpillora/backoff/README.md b/vendor/github.com/jpillora/backoff/README.md new file mode 100644 index 00000000000..ee4d6230afe --- /dev/null +++ b/vendor/github.com/jpillora/backoff/README.md @@ -0,0 +1,119 @@ +# Backoff + +A simple exponential backoff counter in Go (Golang) + +[![GoDoc](https://godoc.org/github.com/jpillora/backoff?status.svg)](https://godoc.org/github.com/jpillora/backoff) [![Circle CI](https://circleci.com/gh/jpillora/backoff.svg?style=shield)](https://circleci.com/gh/jpillora/backoff) + +### Install + +``` +$ go get -v github.com/jpillora/backoff +``` + +### Usage + +Backoff is a `time.Duration` counter. It starts at `Min`. After every call to `Duration()` it is multiplied by `Factor`. It is capped at `Max`. It returns to `Min` on every call to `Reset()`. `Jitter` adds randomness ([see below](#example-using-jitter)). Used in conjunction with the `time` package. + +--- + +#### Simple example + +``` go + +b := &backoff.Backoff{ + //These are the defaults + Min: 100 * time.Millisecond, + Max: 10 * time.Second, + Factor: 2, + Jitter: false, +} + +fmt.Printf("%s\n", b.Duration()) +fmt.Printf("%s\n", b.Duration()) +fmt.Printf("%s\n", b.Duration()) + +fmt.Printf("Reset!\n") +b.Reset() + +fmt.Printf("%s\n", b.Duration()) +``` + +``` +100ms +200ms +400ms +Reset! +100ms +``` + +--- + +#### Example using `net` package + +``` go +b := &backoff.Backoff{ + Max: 5 * time.Minute, +} + +for { + conn, err := net.Dial("tcp", "example.com:5309") + if err != nil { + d := b.Duration() + fmt.Printf("%s, reconnecting in %s", err, d) + time.Sleep(d) + continue + } + //connected + b.Reset() + conn.Write([]byte("hello world!")) + // ... Read ... Write ... etc + conn.Close() + //disconnected +} + +``` + +--- + +#### Example using `Jitter` + +Enabling `Jitter` adds some randomization to the backoff durations. [See Amazon's writeup of performance gains using jitter](http://www.awsarchitectureblog.com/2015/03/backoff.html). Seeding is not necessary but doing so gives repeatable results. + +```go +import "math/rand" + +b := &backoff.Backoff{ + Jitter: true, +} + +rand.Seed(42) + +fmt.Printf("%s\n", b.Duration()) +fmt.Printf("%s\n", b.Duration()) +fmt.Printf("%s\n", b.Duration()) + +fmt.Printf("Reset!\n") +b.Reset() + +fmt.Printf("%s\n", b.Duration()) +fmt.Printf("%s\n", b.Duration()) +fmt.Printf("%s\n", b.Duration()) +``` + +``` +100ms +106.600049ms +281.228155ms +Reset! +100ms +104.381845ms +214.957989ms +``` + +#### Documentation + +https://godoc.org/github.com/jpillora/backoff + +#### Credits + +Forked from [some JavaScript](https://github.com/segmentio/backo) written by [@tj](https://github.com/tj) diff --git a/vendor/github.com/jpillora/backoff/backoff.go b/vendor/github.com/jpillora/backoff/backoff.go new file mode 100644 index 00000000000..d113e68906b --- /dev/null +++ b/vendor/github.com/jpillora/backoff/backoff.go @@ -0,0 +1,100 @@ +// Package backoff provides an exponential-backoff implementation. +package backoff + +import ( + "math" + "math/rand" + "sync/atomic" + "time" +) + +// Backoff is a time.Duration counter, starting at Min. After every call to +// the Duration method the current timing is multiplied by Factor, but it +// never exceeds Max. +// +// Backoff is not generally concurrent-safe, but the ForAttempt method can +// be used concurrently. +type Backoff struct { + attempt uint64 + // Factor is the multiplying factor for each increment step + Factor float64 + // Jitter eases contention by randomizing backoff steps + Jitter bool + // Min and Max are the minimum and maximum values of the counter + Min, Max time.Duration +} + +// Duration returns the duration for the current attempt before incrementing +// the attempt counter. See ForAttempt. +func (b *Backoff) Duration() time.Duration { + d := b.ForAttempt(float64(atomic.AddUint64(&b.attempt, 1) - 1)) + return d +} + +const maxInt64 = float64(math.MaxInt64 - 512) + +// ForAttempt returns the duration for a specific attempt. This is useful if +// you have a large number of independent Backoffs, but don't want use +// unnecessary memory storing the Backoff parameters per Backoff. The first +// attempt should be 0. +// +// ForAttempt is concurrent-safe. +func (b *Backoff) ForAttempt(attempt float64) time.Duration { + // Zero-values are nonsensical, so we use + // them to apply defaults + min := b.Min + if min <= 0 { + min = 100 * time.Millisecond + } + max := b.Max + if max <= 0 { + max = 10 * time.Second + } + if min >= max { + // short-circuit + return max + } + factor := b.Factor + if factor <= 0 { + factor = 2 + } + //calculate this duration + minf := float64(min) + durf := minf * math.Pow(factor, attempt) + if b.Jitter { + durf = rand.Float64()*(durf-minf) + minf + } + //ensure float64 wont overflow int64 + if durf > maxInt64 { + return max + } + dur := time.Duration(durf) + //keep within bounds + if dur < min { + return min + } + if dur > max { + return max + } + return dur +} + +// Reset restarts the current attempt counter at zero. +func (b *Backoff) Reset() { + atomic.StoreUint64(&b.attempt, 0) +} + +// Attempt returns the current attempt counter value. +func (b *Backoff) Attempt() float64 { + return float64(atomic.LoadUint64(&b.attempt)) +} + +// Copy returns a backoff with equals constraints as the original +func (b *Backoff) Copy() *Backoff { + return &Backoff{ + Factor: b.Factor, + Jitter: b.Jitter, + Min: b.Min, + Max: b.Max, + } +} diff --git a/vendor/github.com/klauspost/compress/snappy/.gitignore b/vendor/github.com/klauspost/compress/snappy/.gitignore new file mode 100644 index 00000000000..042091d9b3b --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/snappy/AUTHORS b/vendor/github.com/klauspost/compress/snappy/AUTHORS new file mode 100644 index 00000000000..52ccb5a934d --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/AUTHORS @@ -0,0 +1,18 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Amazon.com, Inc +Damian Gryski +Eric Buth +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Klaus Post +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS new file mode 100644 index 00000000000..ea6524ddd02 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS @@ -0,0 +1,41 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Alex Legg +Damian Gryski +Eric Buth +Jan Mercl <0xjnml@gmail.com> +Jonathan Swinney +Kai Backman +Klaus Post +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/LICENSE b/vendor/github.com/klauspost/compress/snappy/LICENSE new file mode 100644 index 00000000000..6050c10f4c8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/snappy/README.md b/vendor/github.com/klauspost/compress/snappy/README.md new file mode 100644 index 00000000000..7abf6dfc92c --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/README.md @@ -0,0 +1,17 @@ +# snappy + +The Snappy compression format in the Go programming language. + +This is a plug-in replacement for `github.com/golang/snappy`. + +It provides full replacement of the Snappy package. + +See [Snappy Compatibility](https://github.com/klauspost/compress/tree/master/s2#snappy-compatibility) in the S2 documentation. + +"Better" compression mode is used. For buffered streams concurrent compression is used. + +For more options use the [s2 package](https://pkg.go.dev/github.com/klauspost/compress/s2). + +# usage + +Replace imports `github.com/golang/snappy` with `github.com/klauspost/compress/snappy`. diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/klauspost/compress/snappy/decode.go new file mode 100644 index 00000000000..89f1fa23444 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode.go @@ -0,0 +1,60 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "io" + + "github.com/klauspost/compress/s2" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = s2.ErrCorrupt + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = s2.ErrTooLarge + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = s2.ErrUnsupported +) + +const ( + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + return s2.DecodedLen(src) +} + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + return s2.Decode(dst, src) +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return s2.NewReader(r, s2.ReaderMaxBlockSize(maxBlockSize)) +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader = s2.Reader diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/klauspost/compress/snappy/encode.go new file mode 100644 index 00000000000..e8bd72c1864 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode.go @@ -0,0 +1,59 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "io" + + "github.com/klauspost/compress/s2" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + return s2.EncodeSnappyBetter(dst, src) +} + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + return s2.MaxEncodedLen(srcLen) +} + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return s2.NewWriter(w, s2.WriterSnappyCompat(), s2.WriterBetterCompression(), s2.WriterFlushOnWrite(), s2.WriterConcurrency(1)) +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return s2.NewWriter(w, s2.WriterSnappyCompat(), s2.WriterBetterCompression()) +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer = s2.Writer diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/klauspost/compress/snappy/snappy.go new file mode 100644 index 00000000000..398cdc95a01 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/snappy.go @@ -0,0 +1,46 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ diff --git a/vendor/github.com/mwitkow/go-conntrack/.gitignore b/vendor/github.com/mwitkow/go-conntrack/.gitignore new file mode 100644 index 00000000000..406e49369a4 --- /dev/null +++ b/vendor/github.com/mwitkow/go-conntrack/.gitignore @@ -0,0 +1,163 @@ +# Created by .ignore support plugin (hsz.mobi) +### JetBrains template +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff: +.idea +.idea/workspace.xml +.idea/tasks.xml +.idea/dictionaries +.idea/vcs.xml +.idea/jsLibraryMappings.xml + +# Sensitive or high-churn files: +.idea/dataSources.ids +.idea/dataSources.xml +.idea/dataSources.local.xml +.idea/sqlDataSources.xml +.idea/dynamic.xml +.idea/uiDesigner.xml + +# Gradle: +.idea/gradle.xml +.idea/libraries + +# Mongo Explorer plugin: +.idea/mongoSettings.xml + +## File-based project format: +*.iws + +## Plugin-specific files: + +# IntelliJ +/out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties +### Go template +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +### Python template +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# IPython Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# dotenv +.env + +# virtualenv +venv/ +ENV/ + +# Spyder project settings +.spyderproject + +# Rope project settings +.ropeproject + diff --git a/vendor/github.com/mwitkow/go-conntrack/.travis.yml b/vendor/github.com/mwitkow/go-conntrack/.travis.yml new file mode 100644 index 00000000000..a9654fa05a3 --- /dev/null +++ b/vendor/github.com/mwitkow/go-conntrack/.travis.yml @@ -0,0 +1,17 @@ +sudo: false +language: go +go: +- "1.8" +- "1.9" +- "1.10" +- "1.11" +- "1.12" + +install: +- go get github.com/stretchr/testify +- go get github.com/prometheus/client_golang/prometheus +- go get golang.org/x/net/context +- go get golang.org/x/net/trace + +script: +- go test -v ./... diff --git a/vendor/github.com/mwitkow/go-conntrack/LICENSE b/vendor/github.com/mwitkow/go-conntrack/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/github.com/mwitkow/go-conntrack/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/mwitkow/go-conntrack/README.md b/vendor/github.com/mwitkow/go-conntrack/README.md new file mode 100644 index 00000000000..5ae77028448 --- /dev/null +++ b/vendor/github.com/mwitkow/go-conntrack/README.md @@ -0,0 +1,88 @@ +# Go tracing and monitoring (Prometheus) for `net.Conn` + +[![Travis Build](https://travis-ci.org/mwitkow/go-conntrack.svg)](https://travis-ci.org/mwitkow/go-conntrack) +[![Go Report Card](https://goreportcard.com/badge/github.com/mwitkow/go-conntrack)](http://goreportcard.com/report/mwitkow/go-conntrack) +[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/mwitkow/go-conntrack) +[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) + +[Prometheus](https://prometheus.io/) monitoring and [`x/net/trace`](https://godoc.org/golang.org/x/net/trace#EventLog) tracing wrappers `net.Conn`, both inbound (`net.Listener`) and outbound (`net.Dialer`). + +## Why? + +Go standard library does a great job of doing "the right" things with your connections: `http.Transport` pools outbound ones, and `http.Server` sets good *Keep Alive* defaults. +However, it is still easy to get it wrong, see the excellent [*The complete guide to Go net/http timeouts*](https://blog.cloudflare.com/the-complete-guide-to-golang-net-http-timeouts/). + +That's why you should be able to monitor (using Prometheus) how many connections your Go frontend servers have inbound, and how big are the connection pools to your backends. You should also be able to inspect your connection without `ssh` and `netstat`. + +![Events page with connections](https://raw.githubusercontent.com/mwitkow/go-conntrack/images/events.png) + +## How to use? + +All of these examples can be found in [`example/server.go`](example/server.go): + +### Conntrack Dialer for HTTP DefaultClient + +Most often people use the default `http.DefaultClient` that uses `http.DefaultTransport`. The easiest way to make sure all your outbound connections monitored and trace is: + +```go +http.DefaultTransport.(*http.Transport).DialContext = conntrack.NewDialContextFunc( + conntrack.DialWithTracing(), + conntrack.DialWithDialer(&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }), +) +``` + +#### Dialer Name + +Tracked outbound connections are organised by *dialer name* (with `default` being default). The *dialer name* is used for monitoring (`dialer_name` label) and tracing (`net.ClientConn.` family). + +You can pass `conntrack.WithDialerName()` to `NewDialContextFunc` to set the name for the dialer. Moreover, you can set the *dialer name* per invocation of the dialer, by passing it in the `Context`. For example using the [`ctxhttp`](https://godoc.org/golang.org/x/net/context/ctxhttp) lib: + +```go +callCtx := conntrack.DialNameToContext(parentCtx, "google") +ctxhttp.Get(callCtx, http.DefaultClient, "https://www.google.com") +``` + +### Conntrack Listener for HTTP Server + +Tracked inbound connections are organised by *listener name* (with `default` being default). The *listener name* is used for monitoring (`listener_name` label) and tracing (`net.ServerConn.` family). For example, a simple `http.Server` can be instrumented like this: + +```go +listener, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) +listener = conntrack.NewListener(listener, + conntrack.TrackWithName("http"), + conntrack.TrackWithTracing(), + conntrack.TrackWithTcpKeepAlive(5 * time.Minutes)) +httpServer.Serve(listener) +``` + +Note, the `TrackWithTcpKeepAlive`. The default `http.ListenAndServe` adds a tcp keep alive wrapper to inbound TCP connections. `conntrack.NewListener` allows you to do that without another layer of wrapping. + +#### TLS server example + +The standard lobrary `http.ListenAndServerTLS` does a lot to bootstrap TLS connections, including supporting HTTP2 negotiation. Unfortunately, that is hard to do if you want to provide your own `net.Listener`. That's why this repo comes with `connhelpers` package, which takes care of configuring `tls.Config` for that use case. Here's an example of use: + +```go +listener, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) +listener = conntrack.NewListener(listener, + conntrack.TrackWithName("https"), + conntrack.TrackWithTracing(), + conntrack.TrackWithTcpKeepAlive(5 * time.Minutes)) +tlsConfig, err := connhelpers.TlsConfigForServerCerts(*tlsCertFilePath, *tlsKeyFilePath) +tlsConfig, err = connhelpers.TlsConfigWithHttp2Enabled(tlsConfig) +tlsListener := tls.NewListener(listener, tlsConfig) +httpServer.Serve(listener) +``` + +# Status + +This code is used by Improbable's HTTP frontending and proxying stack for debuging and monitoring of established user connections. + +Additional tooling will be added if needed, and contributions are welcome. + +#License + +`go-conntrack` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. + diff --git a/vendor/github.com/mwitkow/go-conntrack/dialer_reporter.go b/vendor/github.com/mwitkow/go-conntrack/dialer_reporter.go new file mode 100644 index 00000000000..0e39886b577 --- /dev/null +++ b/vendor/github.com/mwitkow/go-conntrack/dialer_reporter.go @@ -0,0 +1,108 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package conntrack + +import ( + "context" + "net" + "os" + "syscall" + + prom "github.com/prometheus/client_golang/prometheus" +) + +type failureReason string + +const ( + failedResolution = "resolution" + failedConnRefused = "refused" + failedTimeout = "timeout" + failedUnknown = "unknown" +) + +var ( + dialerAttemptedTotal = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "net", + Subsystem: "conntrack", + Name: "dialer_conn_attempted_total", + Help: "Total number of connections attempted by the given dialer a given name.", + }, []string{"dialer_name"}) + + dialerConnEstablishedTotal = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "net", + Subsystem: "conntrack", + Name: "dialer_conn_established_total", + Help: "Total number of connections successfully established by the given dialer a given name.", + }, []string{"dialer_name"}) + + dialerConnFailedTotal = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "net", + Subsystem: "conntrack", + Name: "dialer_conn_failed_total", + Help: "Total number of connections failed to dial by the dialer a given name.", + }, []string{"dialer_name", "reason"}) + + dialerConnClosedTotal = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "net", + Subsystem: "conntrack", + Name: "dialer_conn_closed_total", + Help: "Total number of connections closed which originated from the dialer of a given name.", + }, []string{"dialer_name"}) +) + +func init() { + prom.MustRegister(dialerAttemptedTotal) + prom.MustRegister(dialerConnEstablishedTotal) + prom.MustRegister(dialerConnFailedTotal) + prom.MustRegister(dialerConnClosedTotal) +} + +// preRegisterDialerMetrics pre-populates Prometheus labels for the given dialer name, to avoid Prometheus missing labels issue. +func PreRegisterDialerMetrics(dialerName string) { + dialerAttemptedTotal.WithLabelValues(dialerName) + dialerConnEstablishedTotal.WithLabelValues(dialerName) + for _, reason := range []failureReason{failedTimeout, failedResolution, failedConnRefused, failedUnknown} { + dialerConnFailedTotal.WithLabelValues(dialerName, string(reason)) + } + dialerConnClosedTotal.WithLabelValues(dialerName) +} + +func reportDialerConnAttempt(dialerName string) { + dialerAttemptedTotal.WithLabelValues(dialerName).Inc() +} + +func reportDialerConnEstablished(dialerName string) { + dialerConnEstablishedTotal.WithLabelValues(dialerName).Inc() +} + +func reportDialerConnClosed(dialerName string) { + dialerConnClosedTotal.WithLabelValues(dialerName).Inc() +} + +func reportDialerConnFailed(dialerName string, err error) { + if netErr, ok := err.(*net.OpError); ok { + switch nestErr := netErr.Err.(type) { + case *net.DNSError: + dialerConnFailedTotal.WithLabelValues(dialerName, string(failedResolution)).Inc() + return + case *os.SyscallError: + if nestErr.Err == syscall.ECONNREFUSED { + dialerConnFailedTotal.WithLabelValues(dialerName, string(failedConnRefused)).Inc() + } + dialerConnFailedTotal.WithLabelValues(dialerName, string(failedUnknown)).Inc() + return + } + if netErr.Timeout() { + dialerConnFailedTotal.WithLabelValues(dialerName, string(failedTimeout)).Inc() + } + } else if err == context.Canceled || err == context.DeadlineExceeded { + dialerConnFailedTotal.WithLabelValues(dialerName, string(failedTimeout)).Inc() + return + } + dialerConnFailedTotal.WithLabelValues(dialerName, string(failedUnknown)).Inc() +} diff --git a/vendor/github.com/mwitkow/go-conntrack/dialer_wrapper.go b/vendor/github.com/mwitkow/go-conntrack/dialer_wrapper.go new file mode 100644 index 00000000000..cebaf967662 --- /dev/null +++ b/vendor/github.com/mwitkow/go-conntrack/dialer_wrapper.go @@ -0,0 +1,166 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package conntrack + +import ( + "context" + "fmt" + "net" + "sync" + + "golang.org/x/net/trace" +) + +var ( + dialerNameKey = "conntrackDialerKey" +) + +type dialerOpts struct { + name string + monitoring bool + tracing bool + parentDialContextFunc dialerContextFunc +} + +type dialerOpt func(*dialerOpts) + +type dialerContextFunc func(context.Context, string, string) (net.Conn, error) + +// DialWithName sets the name of the dialer for tracking and monitoring. +// This is the name for the dialer (default is `default`), but for `NewDialContextFunc` can be overwritten from the +// Context using `DialNameToContext`. +func DialWithName(name string) dialerOpt { + return func(opts *dialerOpts) { + opts.name = name + } +} + +// DialWithoutMonitoring turns *off* Prometheus monitoring for this dialer. +func DialWithoutMonitoring() dialerOpt { + return func(opts *dialerOpts) { + opts.monitoring = false + } +} + +// DialWithTracing turns *on* the /debug/events tracing of the dial calls. +func DialWithTracing() dialerOpt { + return func(opts *dialerOpts) { + opts.tracing = true + } +} + +// DialWithDialer allows you to override the `net.Dialer` instance used to actually conduct the dials. +func DialWithDialer(parentDialer *net.Dialer) dialerOpt { + return DialWithDialContextFunc(parentDialer.DialContext) +} + +// DialWithDialContextFunc allows you to override func gets used for the actual dialing. The default is `net.Dialer.DialContext`. +func DialWithDialContextFunc(parentDialerFunc dialerContextFunc) dialerOpt { + return func(opts *dialerOpts) { + opts.parentDialContextFunc = parentDialerFunc + } +} + +// DialNameFromContext returns the name of the dialer from the context of the DialContext func, if any. +func DialNameFromContext(ctx context.Context) string { + val, ok := ctx.Value(dialerNameKey).(string) + if !ok { + return "" + } + return val +} + +// DialNameToContext returns a context that will contain a dialer name override. +func DialNameToContext(ctx context.Context, dialerName string) context.Context { + return context.WithValue(ctx, dialerNameKey, dialerName) +} + +// NewDialContextFunc returns a `DialContext` function that tracks outbound connections. +// The signature is compatible with `http.Tranport.DialContext` and is meant to be used there. +func NewDialContextFunc(optFuncs ...dialerOpt) func(context.Context, string, string) (net.Conn, error) { + opts := &dialerOpts{name: defaultName, monitoring: true, parentDialContextFunc: (&net.Dialer{}).DialContext} + for _, f := range optFuncs { + f(opts) + } + if opts.monitoring { + PreRegisterDialerMetrics(opts.name) + } + return func(ctx context.Context, network string, addr string) (net.Conn, error) { + name := opts.name + if ctxName := DialNameFromContext(ctx); ctxName != "" { + name = ctxName + } + return dialClientConnTracker(ctx, network, addr, name, opts) + } +} + +// NewDialFunc returns a `Dial` function that tracks outbound connections. +// The signature is compatible with `http.Tranport.Dial` and is meant to be used there for Go < 1.7. +func NewDialFunc(optFuncs ...dialerOpt) func(string, string) (net.Conn, error) { + dialContextFunc := NewDialContextFunc(optFuncs...) + return func(network string, addr string) (net.Conn, error) { + return dialContextFunc(context.TODO(), network, addr) + } +} + +type clientConnTracker struct { + net.Conn + opts *dialerOpts + dialerName string + event trace.EventLog + mu sync.Mutex +} + +func dialClientConnTracker(ctx context.Context, network string, addr string, dialerName string, opts *dialerOpts) (net.Conn, error) { + var event trace.EventLog + if opts.tracing { + event = trace.NewEventLog(fmt.Sprintf("net.ClientConn.%s", dialerName), fmt.Sprintf("%v", addr)) + } + if opts.monitoring { + reportDialerConnAttempt(dialerName) + } + conn, err := opts.parentDialContextFunc(ctx, network, addr) + if err != nil { + if event != nil { + event.Errorf("failed dialing: %v", err) + event.Finish() + } + if opts.monitoring { + reportDialerConnFailed(dialerName, err) + } + return nil, err + } + if event != nil { + event.Printf("established: %s -> %s", conn.LocalAddr(), conn.RemoteAddr()) + } + if opts.monitoring { + reportDialerConnEstablished(dialerName) + } + tracker := &clientConnTracker{ + Conn: conn, + opts: opts, + dialerName: dialerName, + event: event, + } + return tracker, nil +} + +func (ct *clientConnTracker) Close() error { + err := ct.Conn.Close() + ct.mu.Lock() + if ct.event != nil { + if err != nil { + ct.event.Errorf("failed closing: %v", err) + } else { + ct.event.Printf("closing") + } + ct.event.Finish() + ct.event = nil + } + ct.mu.Unlock() + if ct.opts.monitoring { + reportDialerConnClosed(ct.dialerName) + } + return err +} diff --git a/vendor/github.com/mwitkow/go-conntrack/listener_reporter.go b/vendor/github.com/mwitkow/go-conntrack/listener_reporter.go new file mode 100644 index 00000000000..21a8f5557cf --- /dev/null +++ b/vendor/github.com/mwitkow/go-conntrack/listener_reporter.go @@ -0,0 +1,43 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package conntrack + +import prom "github.com/prometheus/client_golang/prometheus" + +var ( + listenerAcceptedTotal = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "net", + Subsystem: "conntrack", + Name: "listener_conn_accepted_total", + Help: "Total number of connections opened to the listener of a given name.", + }, []string{"listener_name"}) + + listenerClosedTotal = prom.NewCounterVec( + prom.CounterOpts{ + Namespace: "net", + Subsystem: "conntrack", + Name: "listener_conn_closed_total", + Help: "Total number of connections closed that were made to the listener of a given name.", + }, []string{"listener_name"}) +) + +func init() { + prom.MustRegister(listenerAcceptedTotal) + prom.MustRegister(listenerClosedTotal) +} + +// preRegisterListener pre-populates Prometheus labels for the given listener name, to avoid Prometheus missing labels issue. +func preRegisterListenerMetrics(listenerName string) { + listenerAcceptedTotal.WithLabelValues(listenerName) + listenerClosedTotal.WithLabelValues(listenerName) +} + +func reportListenerConnAccepted(listenerName string) { + listenerAcceptedTotal.WithLabelValues(listenerName).Inc() +} + +func reportListenerConnClosed(listenerName string) { + listenerClosedTotal.WithLabelValues(listenerName).Inc() +} diff --git a/vendor/github.com/mwitkow/go-conntrack/listener_wrapper.go b/vendor/github.com/mwitkow/go-conntrack/listener_wrapper.go new file mode 100644 index 00000000000..702fe25577a --- /dev/null +++ b/vendor/github.com/mwitkow/go-conntrack/listener_wrapper.go @@ -0,0 +1,158 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package conntrack + +import ( + "fmt" + "net" + "sync" + "time" + + "github.com/jpillora/backoff" + "golang.org/x/net/trace" +) + +const ( + defaultName = "default" +) + +type listenerOpts struct { + name string + monitoring bool + tracing bool + tcpKeepAlive time.Duration + retryBackoff *backoff.Backoff +} + +type listenerOpt func(*listenerOpts) + +// TrackWithName sets the name of the Listener for use in tracking and monitoring. +func TrackWithName(name string) listenerOpt { + return func(opts *listenerOpts) { + opts.name = name + } +} + +// TrackWithoutMonitoring turns *off* Prometheus monitoring for this listener. +func TrackWithoutMonitoring() listenerOpt { + return func(opts *listenerOpts) { + opts.monitoring = false + } +} + +// TrackWithTracing turns *on* the /debug/events tracing of the live listener connections. +func TrackWithTracing() listenerOpt { + return func(opts *listenerOpts) { + opts.tracing = true + } +} + +// TrackWithRetries enables retrying of temporary Accept() errors, with the given backoff between attempts. +// Concurrent accept calls that receive temporary errors have independent backoff scaling. +func TrackWithRetries(b backoff.Backoff) listenerOpt { + return func(opts *listenerOpts) { + opts.retryBackoff = &b + } +} + +// TrackWithTcpKeepAlive makes sure that any `net.TCPConn` that get accepted have a keep-alive. +// This is useful for HTTP servers in order for, for example laptops, to not use up resources on the +// server while they don't utilise their connection. +// A value of 0 disables it. +func TrackWithTcpKeepAlive(keepalive time.Duration) listenerOpt { + return func(opts *listenerOpts) { + opts.tcpKeepAlive = keepalive + } +} + +type connTrackListener struct { + net.Listener + opts *listenerOpts +} + +// NewListener returns the given listener wrapped in connection tracking listener. +func NewListener(inner net.Listener, optFuncs ...listenerOpt) net.Listener { + opts := &listenerOpts{ + name: defaultName, + monitoring: true, + tracing: false, + } + for _, f := range optFuncs { + f(opts) + } + if opts.monitoring { + preRegisterListenerMetrics(opts.name) + } + return &connTrackListener{ + Listener: inner, + opts: opts, + } +} + +func (ct *connTrackListener) Accept() (net.Conn, error) { + // TODO(mwitkow): Add monitoring of failed accept. + var ( + conn net.Conn + err error + ) + for attempt := 0; ; attempt++ { + conn, err = ct.Listener.Accept() + if err == nil || ct.opts.retryBackoff == nil { + break + } + if t, ok := err.(interface{ Temporary() bool }); !ok || !t.Temporary() { + break + } + time.Sleep(ct.opts.retryBackoff.ForAttempt(float64(attempt))) + } + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok && ct.opts.tcpKeepAlive > 0 { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(ct.opts.tcpKeepAlive) + } + return newServerConnTracker(conn, ct.opts), nil +} + +type serverConnTracker struct { + net.Conn + opts *listenerOpts + event trace.EventLog + mu sync.Mutex +} + +func newServerConnTracker(inner net.Conn, opts *listenerOpts) net.Conn { + tracker := &serverConnTracker{ + Conn: inner, + opts: opts, + } + if opts.tracing { + tracker.event = trace.NewEventLog(fmt.Sprintf("net.ServerConn.%s", opts.name), fmt.Sprintf("%v", inner.RemoteAddr())) + tracker.event.Printf("accepted: %v -> %v", inner.RemoteAddr(), inner.LocalAddr()) + } + if opts.monitoring { + reportListenerConnAccepted(opts.name) + } + return tracker +} + +func (ct *serverConnTracker) Close() error { + err := ct.Conn.Close() + ct.mu.Lock() + if ct.event != nil { + if err != nil { + ct.event.Errorf("failed closing: %v", err) + } else { + ct.event.Printf("closing") + } + ct.event.Finish() + ct.event = nil + } + ct.mu.Unlock() + if ct.opts.monitoring { + reportListenerConnClosed(ct.opts.name) + } + return err +} diff --git a/vendor/github.com/prometheus/common/config/config.go b/vendor/github.com/prometheus/common/config/config.go new file mode 100644 index 00000000000..fffda4a7ef4 --- /dev/null +++ b/vendor/github.com/prometheus/common/config/config.go @@ -0,0 +1,66 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This package no longer handles safe yaml parsing. In order to +// ensure correct yaml unmarshalling, use "yaml.UnmarshalStrict()". + +package config + +import ( + "encoding/json" + "path/filepath" +) + +const secretToken = "" + +// Secret special type for storing secrets. +type Secret string + +// MarshalYAML implements the yaml.Marshaler interface for Secrets. +func (s Secret) MarshalYAML() (interface{}, error) { + if s != "" { + return secretToken, nil + } + return nil, nil +} + +//UnmarshalYAML implements the yaml.Unmarshaler interface for Secrets. +func (s *Secret) UnmarshalYAML(unmarshal func(interface{}) error) error { + type plain Secret + return unmarshal((*plain)(s)) +} + +// MarshalJSON implements the json.Marshaler interface for Secret. +func (s Secret) MarshalJSON() ([]byte, error) { + if len(s) == 0 { + return json.Marshal("") + } + return json.Marshal(secretToken) +} + +// DirectorySetter is a config type that contains file paths that may +// be relative to the file containing the config. +type DirectorySetter interface { + // SetDirectory joins any relative file paths with dir. + // Any paths that are empty or absolute remain unchanged. + SetDirectory(dir string) +} + +// JoinDir joins dir and path if path is relative. +// If path is empty or absolute, it is returned unchanged. +func JoinDir(dir, path string) string { + if path == "" || filepath.IsAbs(path) { + return path + } + return filepath.Join(dir, path) +} diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go new file mode 100644 index 00000000000..4b872417105 --- /dev/null +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -0,0 +1,855 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package config + +import ( + "bytes" + "context" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" + + "github.com/mwitkow/go-conntrack" + "golang.org/x/net/http2" + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" + "gopkg.in/yaml.v2" +) + +// DefaultHTTPClientConfig is the default HTTP client configuration. +var DefaultHTTPClientConfig = HTTPClientConfig{ + FollowRedirects: true, +} + +// defaultHTTPClientOptions holds the default HTTP client options. +var defaultHTTPClientOptions = httpClientOptions{ + keepAlivesEnabled: true, + http2Enabled: true, + // 5 minutes is typically above the maximum sane scrape interval. So we can + // use keepalive for all configurations. + idleConnTimeout: 5 * time.Minute, +} + +type closeIdler interface { + CloseIdleConnections() +} + +// BasicAuth contains basic HTTP authentication credentials. +type BasicAuth struct { + Username string `yaml:"username" json:"username"` + Password Secret `yaml:"password,omitempty" json:"password,omitempty"` + PasswordFile string `yaml:"password_file,omitempty" json:"password_file,omitempty"` +} + +// SetDirectory joins any relative file paths with dir. +func (a *BasicAuth) SetDirectory(dir string) { + if a == nil { + return + } + a.PasswordFile = JoinDir(dir, a.PasswordFile) +} + +// Authorization contains HTTP authorization credentials. +type Authorization struct { + Type string `yaml:"type,omitempty" json:"type,omitempty"` + Credentials Secret `yaml:"credentials,omitempty" json:"credentials,omitempty"` + CredentialsFile string `yaml:"credentials_file,omitempty" json:"credentials_file,omitempty"` +} + +// SetDirectory joins any relative file paths with dir. +func (a *Authorization) SetDirectory(dir string) { + if a == nil { + return + } + a.CredentialsFile = JoinDir(dir, a.CredentialsFile) +} + +// URL is a custom URL type that allows validation at configuration load time. +type URL struct { + *url.URL +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface for URLs. +func (u *URL) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + + urlp, err := url.Parse(s) + if err != nil { + return err + } + u.URL = urlp + return nil +} + +// MarshalYAML implements the yaml.Marshaler interface for URLs. +func (u URL) MarshalYAML() (interface{}, error) { + if u.URL != nil { + return u.Redacted(), nil + } + return nil, nil +} + +// Redacted returns the URL but replaces any password with "xxxxx". +func (u URL) Redacted() string { + if u.URL == nil { + return "" + } + + ru := *u.URL + if _, ok := ru.User.Password(); ok { + // We can not use secretToken because it would be escaped. + ru.User = url.UserPassword(ru.User.Username(), "xxxxx") + } + return ru.String() +} + +// UnmarshalJSON implements the json.Marshaler interface for URL. +func (u *URL) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + urlp, err := url.Parse(s) + if err != nil { + return err + } + u.URL = urlp + return nil +} + +// MarshalJSON implements the json.Marshaler interface for URL. +func (u URL) MarshalJSON() ([]byte, error) { + if u.URL != nil { + return json.Marshal(u.URL.String()) + } + return []byte("null"), nil +} + +// OAuth2 is the oauth2 client configuration. +type OAuth2 struct { + ClientID string `yaml:"client_id" json:"client_id"` + ClientSecret Secret `yaml:"client_secret" json:"client_secret"` + ClientSecretFile string `yaml:"client_secret_file" json:"client_secret_file"` + Scopes []string `yaml:"scopes,omitempty" json:"scopes,omitempty"` + TokenURL string `yaml:"token_url" json:"token_url"` + EndpointParams map[string]string `yaml:"endpoint_params,omitempty" json:"endpoint_params,omitempty"` + + // TLSConfig is used to connect to the token URL. + TLSConfig TLSConfig `yaml:"tls_config,omitempty"` +} + +// SetDirectory joins any relative file paths with dir. +func (a *OAuth2) SetDirectory(dir string) { + if a == nil { + return + } + a.ClientSecretFile = JoinDir(dir, a.ClientSecretFile) + a.TLSConfig.SetDirectory(dir) +} + +// HTTPClientConfig configures an HTTP client. +type HTTPClientConfig struct { + // The HTTP basic authentication credentials for the targets. + BasicAuth *BasicAuth `yaml:"basic_auth,omitempty" json:"basic_auth,omitempty"` + // The HTTP authorization credentials for the targets. + Authorization *Authorization `yaml:"authorization,omitempty" json:"authorization,omitempty"` + // The OAuth2 client credentials used to fetch a token for the targets. + OAuth2 *OAuth2 `yaml:"oauth2,omitempty" json:"oauth2,omitempty"` + // The bearer token for the targets. Deprecated in favour of + // Authorization.Credentials. + BearerToken Secret `yaml:"bearer_token,omitempty" json:"bearer_token,omitempty"` + // The bearer token file for the targets. Deprecated in favour of + // Authorization.CredentialsFile. + BearerTokenFile string `yaml:"bearer_token_file,omitempty" json:"bearer_token_file,omitempty"` + // HTTP proxy server to use to connect to the targets. + ProxyURL URL `yaml:"proxy_url,omitempty" json:"proxy_url,omitempty"` + // TLSConfig to use to connect to the targets. + TLSConfig TLSConfig `yaml:"tls_config,omitempty" json:"tls_config,omitempty"` + // FollowRedirects specifies whether the client should follow HTTP 3xx redirects. + // The omitempty flag is not set, because it would be hidden from the + // marshalled configuration when set to false. + FollowRedirects bool `yaml:"follow_redirects" json:"follow_redirects"` +} + +// SetDirectory joins any relative file paths with dir. +func (c *HTTPClientConfig) SetDirectory(dir string) { + if c == nil { + return + } + c.TLSConfig.SetDirectory(dir) + c.BasicAuth.SetDirectory(dir) + c.Authorization.SetDirectory(dir) + c.OAuth2.SetDirectory(dir) + c.BearerTokenFile = JoinDir(dir, c.BearerTokenFile) +} + +// Validate validates the HTTPClientConfig to check only one of BearerToken, +// BasicAuth and BearerTokenFile is configured. +func (c *HTTPClientConfig) Validate() error { + // Backwards compatibility with the bearer_token field. + if len(c.BearerToken) > 0 && len(c.BearerTokenFile) > 0 { + return fmt.Errorf("at most one of bearer_token & bearer_token_file must be configured") + } + if (c.BasicAuth != nil || c.OAuth2 != nil) && (len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0) { + return fmt.Errorf("at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured") + } + if c.BasicAuth != nil && (string(c.BasicAuth.Password) != "" && c.BasicAuth.PasswordFile != "") { + return fmt.Errorf("at most one of basic_auth password & password_file must be configured") + } + if c.Authorization != nil { + if len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0 { + return fmt.Errorf("authorization is not compatible with bearer_token & bearer_token_file") + } + if string(c.Authorization.Credentials) != "" && c.Authorization.CredentialsFile != "" { + return fmt.Errorf("at most one of authorization credentials & credentials_file must be configured") + } + c.Authorization.Type = strings.TrimSpace(c.Authorization.Type) + if len(c.Authorization.Type) == 0 { + c.Authorization.Type = "Bearer" + } + if strings.ToLower(c.Authorization.Type) == "basic" { + return fmt.Errorf(`authorization type cannot be set to "basic", use "basic_auth" instead`) + } + if c.BasicAuth != nil || c.OAuth2 != nil { + return fmt.Errorf("at most one of basic_auth, oauth2 & authorization must be configured") + } + } else { + if len(c.BearerToken) > 0 { + c.Authorization = &Authorization{Credentials: c.BearerToken} + c.Authorization.Type = "Bearer" + c.BearerToken = "" + } + if len(c.BearerTokenFile) > 0 { + c.Authorization = &Authorization{CredentialsFile: c.BearerTokenFile} + c.Authorization.Type = "Bearer" + c.BearerTokenFile = "" + } + } + if c.OAuth2 != nil { + if c.BasicAuth != nil { + return fmt.Errorf("at most one of basic_auth, oauth2 & authorization must be configured") + } + if len(c.OAuth2.ClientID) == 0 { + return fmt.Errorf("oauth2 client_id must be configured") + } + if len(c.OAuth2.ClientSecret) == 0 && len(c.OAuth2.ClientSecretFile) == 0 { + return fmt.Errorf("either oauth2 client_secret or client_secret_file must be configured") + } + if len(c.OAuth2.TokenURL) == 0 { + return fmt.Errorf("oauth2 token_url must be configured") + } + if len(c.OAuth2.ClientSecret) > 0 && len(c.OAuth2.ClientSecretFile) > 0 { + return fmt.Errorf("at most one of oauth2 client_secret & client_secret_file must be configured") + } + } + return nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +func (c *HTTPClientConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + type plain HTTPClientConfig + *c = DefaultHTTPClientConfig + if err := unmarshal((*plain)(c)); err != nil { + return err + } + return c.Validate() +} + +// UnmarshalJSON implements the json.Marshaler interface for URL. +func (c *HTTPClientConfig) UnmarshalJSON(data []byte) error { + type plain HTTPClientConfig + *c = DefaultHTTPClientConfig + if err := json.Unmarshal(data, (*plain)(c)); err != nil { + return err + } + return c.Validate() +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (a *BasicAuth) UnmarshalYAML(unmarshal func(interface{}) error) error { + type plain BasicAuth + return unmarshal((*plain)(a)) +} + +// DialContextFunc defines the signature of the DialContext() function implemented +// by net.Dialer. +type DialContextFunc func(context.Context, string, string) (net.Conn, error) + +type httpClientOptions struct { + dialContextFunc DialContextFunc + keepAlivesEnabled bool + http2Enabled bool + idleConnTimeout time.Duration +} + +// HTTPClientOption defines an option that can be applied to the HTTP client. +type HTTPClientOption func(options *httpClientOptions) + +// WithDialContextFunc allows you to override func gets used for the actual dialing. The default is `net.Dialer.DialContext`. +func WithDialContextFunc(fn DialContextFunc) HTTPClientOption { + return func(opts *httpClientOptions) { + opts.dialContextFunc = fn + } +} + +// WithKeepAlivesDisabled allows to disable HTTP keepalive. +func WithKeepAlivesDisabled() HTTPClientOption { + return func(opts *httpClientOptions) { + opts.keepAlivesEnabled = false + } +} + +// WithHTTP2Disabled allows to disable HTTP2. +func WithHTTP2Disabled() HTTPClientOption { + return func(opts *httpClientOptions) { + opts.http2Enabled = false + } +} + +// WithIdleConnTimeout allows setting the idle connection timeout. +func WithIdleConnTimeout(timeout time.Duration) HTTPClientOption { + return func(opts *httpClientOptions) { + opts.idleConnTimeout = timeout + } +} + +// NewClient returns a http.Client using the specified http.RoundTripper. +func newClient(rt http.RoundTripper) *http.Client { + return &http.Client{Transport: rt} +} + +// NewClientFromConfig returns a new HTTP client configured for the +// given config.HTTPClientConfig and config.HTTPClientOption. +// The name is used as go-conntrack metric label. +func NewClientFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HTTPClientOption) (*http.Client, error) { + rt, err := NewRoundTripperFromConfig(cfg, name, optFuncs...) + if err != nil { + return nil, err + } + client := newClient(rt) + if !cfg.FollowRedirects { + client.CheckRedirect = func(*http.Request, []*http.Request) error { + return http.ErrUseLastResponse + } + } + return client, nil +} + +// NewRoundTripperFromConfig returns a new HTTP RoundTripper configured for the +// given config.HTTPClientConfig and config.HTTPClientOption. +// The name is used as go-conntrack metric label. +func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HTTPClientOption) (http.RoundTripper, error) { + opts := defaultHTTPClientOptions + for _, f := range optFuncs { + f(&opts) + } + + var dialContext func(ctx context.Context, network, addr string) (net.Conn, error) + + if opts.dialContextFunc != nil { + dialContext = conntrack.NewDialContextFunc( + conntrack.DialWithDialContextFunc((func(context.Context, string, string) (net.Conn, error))(opts.dialContextFunc)), + conntrack.DialWithTracing(), + conntrack.DialWithName(name)) + } else { + dialContext = conntrack.NewDialContextFunc( + conntrack.DialWithTracing(), + conntrack.DialWithName(name)) + } + + newRT := func(tlsConfig *tls.Config) (http.RoundTripper, error) { + // The only timeout we care about is the configured scrape timeout. + // It is applied on request. So we leave out any timings here. + var rt http.RoundTripper = &http.Transport{ + Proxy: http.ProxyURL(cfg.ProxyURL.URL), + MaxIdleConns: 20000, + MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801 + DisableKeepAlives: !opts.keepAlivesEnabled, + TLSClientConfig: tlsConfig, + DisableCompression: true, + IdleConnTimeout: opts.idleConnTimeout, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + DialContext: dialContext, + } + if opts.http2Enabled && os.Getenv("PROMETHEUS_COMMON_DISABLE_HTTP2") == "" { + // HTTP/2 support is golang had many problematic cornercases where + // dead connections would be kept and used in connection pools. + // https://github.com/golang/go/issues/32388 + // https://github.com/golang/go/issues/39337 + // https://github.com/golang/go/issues/39750 + + // Do not enable HTTP2 if the environment variable + // PROMETHEUS_COMMON_DISABLE_HTTP2 is set to a non-empty value. + // This allows users to easily disable HTTP2 in case they run into + // issues again, but will be removed once we are confident that + // things work as expected. + + http2t, err := http2.ConfigureTransports(rt.(*http.Transport)) + if err != nil { + return nil, err + } + http2t.ReadIdleTimeout = time.Minute + } + + // If a authorization_credentials is provided, create a round tripper that will set the + // Authorization header correctly on each request. + if cfg.Authorization != nil && len(cfg.Authorization.Credentials) > 0 { + rt = NewAuthorizationCredentialsRoundTripper(cfg.Authorization.Type, cfg.Authorization.Credentials, rt) + } else if cfg.Authorization != nil && len(cfg.Authorization.CredentialsFile) > 0 { + rt = NewAuthorizationCredentialsFileRoundTripper(cfg.Authorization.Type, cfg.Authorization.CredentialsFile, rt) + } + // Backwards compatibility, be nice with importers who would not have + // called Validate(). + if len(cfg.BearerToken) > 0 { + rt = NewAuthorizationCredentialsRoundTripper("Bearer", cfg.BearerToken, rt) + } else if len(cfg.BearerTokenFile) > 0 { + rt = NewAuthorizationCredentialsFileRoundTripper("Bearer", cfg.BearerTokenFile, rt) + } + + if cfg.BasicAuth != nil { + rt = NewBasicAuthRoundTripper(cfg.BasicAuth.Username, cfg.BasicAuth.Password, cfg.BasicAuth.PasswordFile, rt) + } + + if cfg.OAuth2 != nil { + rt = NewOAuth2RoundTripper(cfg.OAuth2, rt) + } + // Return a new configured RoundTripper. + return rt, nil + } + + tlsConfig, err := NewTLSConfig(&cfg.TLSConfig) + if err != nil { + return nil, err + } + + if len(cfg.TLSConfig.CAFile) == 0 { + // No need for a RoundTripper that reloads the CA file automatically. + return newRT(tlsConfig) + } + + return NewTLSRoundTripper(tlsConfig, cfg.TLSConfig.CAFile, newRT) +} + +type authorizationCredentialsRoundTripper struct { + authType string + authCredentials Secret + rt http.RoundTripper +} + +// NewAuthorizationCredentialsRoundTripper adds the provided credentials to a +// request unless the authorization header has already been set. +func NewAuthorizationCredentialsRoundTripper(authType string, authCredentials Secret, rt http.RoundTripper) http.RoundTripper { + return &authorizationCredentialsRoundTripper{authType, authCredentials, rt} +} + +func (rt *authorizationCredentialsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Authorization")) == 0 { + req = cloneRequest(req) + req.Header.Set("Authorization", fmt.Sprintf("%s %s", rt.authType, string(rt.authCredentials))) + } + return rt.rt.RoundTrip(req) +} + +func (rt *authorizationCredentialsRoundTripper) CloseIdleConnections() { + if ci, ok := rt.rt.(closeIdler); ok { + ci.CloseIdleConnections() + } +} + +type authorizationCredentialsFileRoundTripper struct { + authType string + authCredentialsFile string + rt http.RoundTripper +} + +// NewAuthorizationCredentialsFileRoundTripper adds the authorization +// credentials read from the provided file to a request unless the authorization +// header has already been set. This file is read for every request. +func NewAuthorizationCredentialsFileRoundTripper(authType, authCredentialsFile string, rt http.RoundTripper) http.RoundTripper { + return &authorizationCredentialsFileRoundTripper{authType, authCredentialsFile, rt} +} + +func (rt *authorizationCredentialsFileRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Authorization")) == 0 { + b, err := ioutil.ReadFile(rt.authCredentialsFile) + if err != nil { + return nil, fmt.Errorf("unable to read authorization credentials file %s: %s", rt.authCredentialsFile, err) + } + authCredentials := strings.TrimSpace(string(b)) + + req = cloneRequest(req) + req.Header.Set("Authorization", fmt.Sprintf("%s %s", rt.authType, authCredentials)) + } + + return rt.rt.RoundTrip(req) +} + +func (rt *authorizationCredentialsFileRoundTripper) CloseIdleConnections() { + if ci, ok := rt.rt.(closeIdler); ok { + ci.CloseIdleConnections() + } +} + +type basicAuthRoundTripper struct { + username string + password Secret + passwordFile string + rt http.RoundTripper +} + +// NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a request unless it has +// already been set. +func NewBasicAuthRoundTripper(username string, password Secret, passwordFile string, rt http.RoundTripper) http.RoundTripper { + return &basicAuthRoundTripper{username, password, passwordFile, rt} +} + +func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Authorization")) != 0 { + return rt.rt.RoundTrip(req) + } + req = cloneRequest(req) + if rt.passwordFile != "" { + bs, err := ioutil.ReadFile(rt.passwordFile) + if err != nil { + return nil, fmt.Errorf("unable to read basic auth password file %s: %s", rt.passwordFile, err) + } + req.SetBasicAuth(rt.username, strings.TrimSpace(string(bs))) + } else { + req.SetBasicAuth(rt.username, strings.TrimSpace(string(rt.password))) + } + return rt.rt.RoundTrip(req) +} + +func (rt *basicAuthRoundTripper) CloseIdleConnections() { + if ci, ok := rt.rt.(closeIdler); ok { + ci.CloseIdleConnections() + } +} + +type oauth2RoundTripper struct { + config *OAuth2 + rt http.RoundTripper + next http.RoundTripper + secret string + mtx sync.RWMutex +} + +func NewOAuth2RoundTripper(config *OAuth2, next http.RoundTripper) http.RoundTripper { + return &oauth2RoundTripper{ + config: config, + next: next, + } +} + +func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + var ( + secret string + changed bool + ) + + if rt.config.ClientSecretFile != "" { + data, err := ioutil.ReadFile(rt.config.ClientSecretFile) + if err != nil { + return nil, fmt.Errorf("unable to read oauth2 client secret file %s: %s", rt.config.ClientSecretFile, err) + } + secret = strings.TrimSpace(string(data)) + rt.mtx.RLock() + changed = secret != rt.secret + rt.mtx.RUnlock() + } + + if changed || rt.rt == nil { + if rt.config.ClientSecret != "" { + secret = string(rt.config.ClientSecret) + } + + config := &clientcredentials.Config{ + ClientID: rt.config.ClientID, + ClientSecret: secret, + Scopes: rt.config.Scopes, + TokenURL: rt.config.TokenURL, + EndpointParams: mapToValues(rt.config.EndpointParams), + } + + tlsConfig, err := NewTLSConfig(&rt.config.TLSConfig) + if err != nil { + return nil, err + } + + var t http.RoundTripper + if len(rt.config.TLSConfig.CAFile) == 0 { + t = &http.Transport{TLSClientConfig: tlsConfig} + } else { + t, err = NewTLSRoundTripper(tlsConfig, rt.config.TLSConfig.CAFile, func(tls *tls.Config) (http.RoundTripper, error) { + return &http.Transport{TLSClientConfig: tls}, nil + }) + if err != nil { + return nil, err + } + } + + ctx := context.WithValue(context.Background(), oauth2.HTTPClient, &http.Client{Transport: t}) + tokenSource := config.TokenSource(ctx) + + rt.mtx.Lock() + rt.secret = secret + rt.rt = &oauth2.Transport{ + Base: rt.next, + Source: tokenSource, + } + rt.mtx.Unlock() + } + + rt.mtx.RLock() + currentRT := rt.rt + rt.mtx.RUnlock() + return currentRT.RoundTrip(req) +} + +func (rt *oauth2RoundTripper) CloseIdleConnections() { + // OAuth2 RT does not support CloseIdleConnections() but the next RT might. + if ci, ok := rt.next.(closeIdler); ok { + ci.CloseIdleConnections() + } +} + +func mapToValues(m map[string]string) url.Values { + v := url.Values{} + for name, value := range m { + v.Set(name, value) + } + + return v +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // Shallow copy of the struct. + r2 := new(http.Request) + *r2 = *r + // Deep copy of the Header. + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +// NewTLSConfig creates a new tls.Config from the given TLSConfig. +func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { + tlsConfig := &tls.Config{InsecureSkipVerify: cfg.InsecureSkipVerify} + + // If a CA cert is provided then let's read it in so we can validate the + // scrape target's certificate properly. + if len(cfg.CAFile) > 0 { + b, err := readCAFile(cfg.CAFile) + if err != nil { + return nil, err + } + if !updateRootCA(tlsConfig, b) { + return nil, fmt.Errorf("unable to use specified CA cert %s", cfg.CAFile) + } + } + + if len(cfg.ServerName) > 0 { + tlsConfig.ServerName = cfg.ServerName + } + // If a client cert & key is provided then configure TLS config accordingly. + if len(cfg.CertFile) > 0 && len(cfg.KeyFile) == 0 { + return nil, fmt.Errorf("client cert file %q specified without client key file", cfg.CertFile) + } else if len(cfg.KeyFile) > 0 && len(cfg.CertFile) == 0 { + return nil, fmt.Errorf("client key file %q specified without client cert file", cfg.KeyFile) + } else if len(cfg.CertFile) > 0 && len(cfg.KeyFile) > 0 { + // Verify that client cert and key are valid. + if _, err := cfg.getClientCertificate(nil); err != nil { + return nil, err + } + tlsConfig.GetClientCertificate = cfg.getClientCertificate + } + + return tlsConfig, nil +} + +// TLSConfig configures the options for TLS connections. +type TLSConfig struct { + // The CA cert to use for the targets. + CAFile string `yaml:"ca_file,omitempty" json:"ca_file,omitempty"` + // The client cert file for the targets. + CertFile string `yaml:"cert_file,omitempty" json:"cert_file,omitempty"` + // The client key file for the targets. + KeyFile string `yaml:"key_file,omitempty" json:"key_file,omitempty"` + // Used to verify the hostname for the targets. + ServerName string `yaml:"server_name,omitempty" json:"server_name,omitempty"` + // Disable target certificate validation. + InsecureSkipVerify bool `yaml:"insecure_skip_verify" json:"insecure_skip_verify"` +} + +// SetDirectory joins any relative file paths with dir. +func (c *TLSConfig) SetDirectory(dir string) { + if c == nil { + return + } + c.CAFile = JoinDir(dir, c.CAFile) + c.CertFile = JoinDir(dir, c.CertFile) + c.KeyFile = JoinDir(dir, c.KeyFile) +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + type plain TLSConfig + return unmarshal((*plain)(c)) +} + +// getClientCertificate reads the pair of client cert and key from disk and returns a tls.Certificate. +func (c *TLSConfig) getClientCertificate(*tls.CertificateRequestInfo) (*tls.Certificate, error) { + cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile) + if err != nil { + return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err) + } + return &cert, nil +} + +// readCAFile reads the CA cert file from disk. +func readCAFile(f string) ([]byte, error) { + data, err := ioutil.ReadFile(f) + if err != nil { + return nil, fmt.Errorf("unable to load specified CA cert %s: %s", f, err) + } + return data, nil +} + +// updateRootCA parses the given byte slice as a series of PEM encoded certificates and updates tls.Config.RootCAs. +func updateRootCA(cfg *tls.Config, b []byte) bool { + caCertPool := x509.NewCertPool() + if !caCertPool.AppendCertsFromPEM(b) { + return false + } + cfg.RootCAs = caCertPool + return true +} + +// tlsRoundTripper is a RoundTripper that updates automatically its TLS +// configuration whenever the content of the CA file changes. +type tlsRoundTripper struct { + caFile string + // newRT returns a new RoundTripper. + newRT func(*tls.Config) (http.RoundTripper, error) + + mtx sync.RWMutex + rt http.RoundTripper + hashCAFile []byte + tlsConfig *tls.Config +} + +func NewTLSRoundTripper( + cfg *tls.Config, + caFile string, + newRT func(*tls.Config) (http.RoundTripper, error), +) (http.RoundTripper, error) { + t := &tlsRoundTripper{ + caFile: caFile, + newRT: newRT, + tlsConfig: cfg, + } + + rt, err := t.newRT(t.tlsConfig) + if err != nil { + return nil, err + } + t.rt = rt + _, t.hashCAFile, err = t.getCAWithHash() + if err != nil { + return nil, err + } + + return t, nil +} + +func (t *tlsRoundTripper) getCAWithHash() ([]byte, []byte, error) { + b, err := readCAFile(t.caFile) + if err != nil { + return nil, nil, err + } + h := sha256.Sum256(b) + return b, h[:], nil + +} + +// RoundTrip implements the http.RoundTrip interface. +func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + b, h, err := t.getCAWithHash() + if err != nil { + return nil, err + } + + t.mtx.RLock() + equal := bytes.Equal(h[:], t.hashCAFile) + rt := t.rt + t.mtx.RUnlock() + if equal { + // The CA cert hasn't changed, use the existing RoundTripper. + return rt.RoundTrip(req) + } + + // Create a new RoundTripper. + tlsConfig := t.tlsConfig.Clone() + if !updateRootCA(tlsConfig, b) { + return nil, fmt.Errorf("unable to use specified CA cert %s", t.caFile) + } + rt, err = t.newRT(tlsConfig) + if err != nil { + return nil, err + } + t.CloseIdleConnections() + + t.mtx.Lock() + t.rt = rt + t.hashCAFile = h[:] + t.mtx.Unlock() + + return rt.RoundTrip(req) +} + +func (t *tlsRoundTripper) CloseIdleConnections() { + t.mtx.RLock() + defer t.mtx.RUnlock() + if ci, ok := t.rt.(closeIdler); ok { + ci.CloseIdleConnections() + } +} + +func (c HTTPClientConfig) String() string { + b, err := yaml.Marshal(c) + if err != nil { + return fmt.Sprintf("", err) + } + return string(b) +} diff --git a/vendor/github.com/prometheus/common/sigv4/LICENSE b/vendor/github.com/prometheus/common/sigv4/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/prometheus/common/sigv4/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/common/sigv4/Makefile b/vendor/github.com/prometheus/common/sigv4/Makefile new file mode 100644 index 00000000000..e7be5dd9aa0 --- /dev/null +++ b/vendor/github.com/prometheus/common/sigv4/Makefile @@ -0,0 +1,22 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +include ../Makefile.common + +.PHONY: test + @echo ">> Running sigv4 tests" + test:: deps check_license unused common-test + +ifeq (,$(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(7|8|9|10)\.')) +test:: lint +endif diff --git a/vendor/github.com/prometheus/common/sigv4/sigv4.go b/vendor/github.com/prometheus/common/sigv4/sigv4.go new file mode 100644 index 00000000000..dd140bb6896 --- /dev/null +++ b/vendor/github.com/prometheus/common/sigv4/sigv4.go @@ -0,0 +1,137 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sigv4 + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/textproto" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/session" + signer "github.com/aws/aws-sdk-go/aws/signer/v4" +) + +var sigv4HeaderDenylist = []string{ + "uber-trace-id", +} + +type sigV4RoundTripper struct { + region string + next http.RoundTripper + pool sync.Pool + + signer *signer.Signer +} + +// NewSigV4RoundTripper returns a new http.RoundTripper that will sign requests +// using Amazon's Signature Verification V4 signing procedure. The request will +// then be handed off to the next RoundTripper provided by next. If next is nil, +// http.DefaultTransport will be used. +// +// Credentials for signing are retrieved using the the default AWS credential +// chain. If credentials cannot be found, an error will be returned. +func NewSigV4RoundTripper(cfg *SigV4Config, next http.RoundTripper) (http.RoundTripper, error) { + if next == nil { + next = http.DefaultTransport + } + + creds := credentials.NewStaticCredentials(cfg.AccessKey, string(cfg.SecretKey), "") + if cfg.AccessKey == "" && cfg.SecretKey == "" { + creds = nil + } + + sess, err := session.NewSessionWithOptions(session.Options{ + Config: aws.Config{ + Region: aws.String(cfg.Region), + Credentials: creds, + }, + Profile: cfg.Profile, + }) + if err != nil { + return nil, fmt.Errorf("could not create new AWS session: %w", err) + } + if _, err := sess.Config.Credentials.Get(); err != nil { + return nil, fmt.Errorf("could not get SigV4 credentials: %w", err) + } + if aws.StringValue(sess.Config.Region) == "" { + return nil, fmt.Errorf("region not configured in sigv4 or in default credentials chain") + } + + signerCreds := sess.Config.Credentials + if cfg.RoleARN != "" { + signerCreds = stscreds.NewCredentials(sess, cfg.RoleARN) + } + + rt := &sigV4RoundTripper{ + region: cfg.Region, + next: next, + signer: signer.NewSigner(signerCreds), + } + rt.pool.New = rt.newBuf + return rt, nil +} + +func (rt *sigV4RoundTripper) newBuf() interface{} { + return bytes.NewBuffer(make([]byte, 0, 1024)) +} + +func (rt *sigV4RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + // rt.signer.Sign needs a seekable body, so we replace the body with a + // buffered reader filled with the contents of original body. + buf := rt.pool.Get().(*bytes.Buffer) + defer func() { + buf.Reset() + rt.pool.Put(buf) + }() + if _, err := io.Copy(buf, req.Body); err != nil { + return nil, err + } + // Close the original body since we don't need it anymore. + _ = req.Body.Close() + + // Ensure our seeker is back at the start of the buffer once we return. + var seeker io.ReadSeeker = bytes.NewReader(buf.Bytes()) + defer func() { + _, _ = seeker.Seek(0, io.SeekStart) + }() + req.Body = ioutil.NopCloser(seeker) + + // Clone the request and trim out headers that we don't want to sign. + signReq := req.Clone(req.Context()) + for _, header := range sigv4HeaderDenylist { + signReq.Header.Del(header) + } + + headers, err := rt.signer.Sign(signReq, seeker, "aps", rt.region, time.Now().UTC()) + if err != nil { + return nil, fmt.Errorf("failed to sign request: %w", err) + } + + // Copy over signed headers. Authorization header is not returned by + // rt.signer.Sign and needs to be copied separately. + for k, v := range headers { + req.Header[textproto.CanonicalMIMEHeaderKey(k)] = v + } + req.Header.Set("Authorization", signReq.Header.Get("Authorization")) + + return rt.next.RoundTrip(req) +} diff --git a/vendor/github.com/prometheus/common/sigv4/sigv4_config.go b/vendor/github.com/prometheus/common/sigv4/sigv4_config.go new file mode 100644 index 00000000000..776fe764ae0 --- /dev/null +++ b/vendor/github.com/prometheus/common/sigv4/sigv4_config.go @@ -0,0 +1,47 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sigv4 + +import ( + "fmt" + + "github.com/prometheus/common/config" +) + +// SigV4Config is the configuration for signing remote write requests with +// AWS's SigV4 verification process. Empty values will be retrieved using the +// AWS default credentials chain. +type SigV4Config struct { + Region string `yaml:"region,omitempty"` + AccessKey string `yaml:"access_key,omitempty"` + SecretKey config.Secret `yaml:"secret_key,omitempty"` + Profile string `yaml:"profile,omitempty"` + RoleARN string `yaml:"role_arn,omitempty"` +} + +func (c *SigV4Config) Validate() error { + if (c.AccessKey == "") != (c.SecretKey == "") { + return fmt.Errorf("must provide a AWS SigV4 Access key and Secret Key if credentials are specified in the SigV4 config") + } + return nil +} + +func (c *SigV4Config) UnmarshalYAML(unmarshal func(interface{}) error) error { + type plain SigV4Config + *c = SigV4Config{} + if err := unmarshal((*plain)(c)); err != nil { + return err + } + return c.Validate() +} diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go new file mode 100644 index 00000000000..24441d2bcbd --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -0,0 +1,820 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/alecthomas/units" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/prometheus/common/sigv4" + yaml "gopkg.in/yaml.v2" + + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" +) + +var ( + patRulePath = regexp.MustCompile(`^[^*]*(\*[^/]*)?$`) + reservedHeaders = map[string]struct{}{ + // NOTE: authorization is checked specially, + // see RemoteWriteConfig.UnmarshalYAML. + // "authorization": {}, + "host": {}, + "content-encoding": {}, + "content-length": {}, + "content-type": {}, + "user-agent": {}, + "connection": {}, + "keep-alive": {}, + "proxy-authenticate": {}, + "proxy-authorization": {}, + "www-authenticate": {}, + "accept-encoding": {}, + "x-prometheus-remote-write-version": {}, + "x-prometheus-remote-read-version": {}, + + // Added by SigV4. + "x-amz-date": {}, + "x-amz-security-token": {}, + "x-amz-content-sha256": {}, + } +) + +// Load parses the YAML input s into a Config. +func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) { + cfg := &Config{} + // If the entire config body is empty the UnmarshalYAML method is + // never called. We thus have to set the DefaultConfig at the entry + // point as well. + *cfg = DefaultConfig + + err := yaml.UnmarshalStrict([]byte(s), cfg) + if err != nil { + return nil, err + } + + if !expandExternalLabels { + return cfg, nil + } + + for i, v := range cfg.GlobalConfig.ExternalLabels { + newV := os.Expand(v.Value, func(s string) string { + if v := os.Getenv(s); v != "" { + return v + } + level.Warn(logger).Log("msg", "Empty environment variable", "name", s) + return "" + }) + if newV != v.Value { + level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV) + v.Value = newV + cfg.GlobalConfig.ExternalLabels[i] = v + } + } + return cfg, nil +} + +// LoadFile parses the given YAML file into a Config. +func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) { + content, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + cfg, err := Load(string(content), expandExternalLabels, logger) + if err != nil { + return nil, errors.Wrapf(err, "parsing YAML file %s", filename) + } + + if agentMode { + if len(cfg.RemoteWriteConfigs) == 0 { + return nil, errors.New("at least one remote_write target must be specified in agent mode") + } + + if len(cfg.AlertingConfig.AlertmanagerConfigs) > 0 || len(cfg.AlertingConfig.AlertRelabelConfigs) > 0 { + return nil, errors.New("field alerting is not allowed in agent mode") + } + + if len(cfg.RuleFiles) > 0 { + return nil, errors.New("field rule_files is not allowed in agent mode") + } + + if len(cfg.RemoteReadConfigs) > 0 { + return nil, errors.New("field remote_read is not allowed in agent mode") + } + } + + cfg.SetDirectory(filepath.Dir(filename)) + return cfg, nil +} + +// The defaults applied before parsing the respective config sections. +var ( + // DefaultConfig is the default top-level configuration. + DefaultConfig = Config{ + GlobalConfig: DefaultGlobalConfig, + } + + // DefaultGlobalConfig is the default global configuration. + DefaultGlobalConfig = GlobalConfig{ + ScrapeInterval: model.Duration(1 * time.Minute), + ScrapeTimeout: model.Duration(10 * time.Second), + EvaluationInterval: model.Duration(1 * time.Minute), + } + + // DefaultScrapeConfig is the default scrape configuration. + DefaultScrapeConfig = ScrapeConfig{ + // ScrapeTimeout and ScrapeInterval default to the + // configured globals. + MetricsPath: "/metrics", + Scheme: "http", + HonorLabels: false, + HonorTimestamps: true, + HTTPClientConfig: config.DefaultHTTPClientConfig, + } + + // DefaultAlertmanagerConfig is the default alertmanager configuration. + DefaultAlertmanagerConfig = AlertmanagerConfig{ + Scheme: "http", + Timeout: model.Duration(10 * time.Second), + APIVersion: AlertmanagerAPIVersionV2, + HTTPClientConfig: config.DefaultHTTPClientConfig, + } + + // DefaultRemoteWriteConfig is the default remote write configuration. + DefaultRemoteWriteConfig = RemoteWriteConfig{ + RemoteTimeout: model.Duration(30 * time.Second), + QueueConfig: DefaultQueueConfig, + MetadataConfig: DefaultMetadataConfig, + HTTPClientConfig: config.DefaultHTTPClientConfig, + } + + // DefaultQueueConfig is the default remote queue configuration. + DefaultQueueConfig = QueueConfig{ + // With a maximum of 200 shards, assuming an average of 100ms remote write + // time and 500 samples per batch, we will be able to push 1M samples/s. + MaxShards: 200, + MinShards: 1, + MaxSamplesPerSend: 500, + + // Each shard will have a max of 2500 samples pending in its channel, plus the pending + // samples that have been enqueued. Theoretically we should only ever have about 3000 samples + // per shard pending. At 200 shards that's 600k. + Capacity: 2500, + BatchSendDeadline: model.Duration(5 * time.Second), + + // Backoff times for retrying a batch of samples on recoverable errors. + MinBackoff: model.Duration(30 * time.Millisecond), + MaxBackoff: model.Duration(5 * time.Second), + } + + // DefaultMetadataConfig is the default metadata configuration for a remote write endpoint. + DefaultMetadataConfig = MetadataConfig{ + Send: true, + SendInterval: model.Duration(1 * time.Minute), + MaxSamplesPerSend: 500, + } + + // DefaultRemoteReadConfig is the default remote read configuration. + DefaultRemoteReadConfig = RemoteReadConfig{ + RemoteTimeout: model.Duration(1 * time.Minute), + HTTPClientConfig: config.DefaultHTTPClientConfig, + } + + // DefaultStorageConfig is the default TSDB/Exemplar storage configuration. + DefaultStorageConfig = StorageConfig{ + ExemplarsConfig: &DefaultExemplarsConfig, + } + + DefaultExemplarsConfig = ExemplarsConfig{ + MaxExemplars: 100000, + } +) + +// Config is the top-level configuration for Prometheus's config files. +type Config struct { + GlobalConfig GlobalConfig `yaml:"global"` + AlertingConfig AlertingConfig `yaml:"alerting,omitempty"` + RuleFiles []string `yaml:"rule_files,omitempty"` + ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"` + StorageConfig StorageConfig `yaml:"storage,omitempty"` + + RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"` + RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"` +} + +// SetDirectory joins any relative file paths with dir. +func (c *Config) SetDirectory(dir string) { + c.GlobalConfig.SetDirectory(dir) + c.AlertingConfig.SetDirectory(dir) + for i, file := range c.RuleFiles { + c.RuleFiles[i] = config.JoinDir(dir, file) + } + for _, c := range c.ScrapeConfigs { + c.SetDirectory(dir) + } + for _, c := range c.RemoteWriteConfigs { + c.SetDirectory(dir) + } + for _, c := range c.RemoteReadConfigs { + c.SetDirectory(dir) + } +} + +func (c Config) String() string { + b, err := yaml.Marshal(c) + if err != nil { + return fmt.Sprintf("", err) + } + return string(b) +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultConfig + // We want to set c to the defaults and then overwrite it with the input. + // To make unmarshal fill the plain data struct rather than calling UnmarshalYAML + // again, we have to hide it using a type indirection. + type plain Config + if err := unmarshal((*plain)(c)); err != nil { + return err + } + + // If a global block was open but empty the default global config is overwritten. + // We have to restore it here. + if c.GlobalConfig.isZero() { + c.GlobalConfig = DefaultGlobalConfig + } + + for _, rf := range c.RuleFiles { + if !patRulePath.MatchString(rf) { + return errors.Errorf("invalid rule file path %q", rf) + } + } + // Do global overrides and validate unique names. + jobNames := map[string]struct{}{} + for _, scfg := range c.ScrapeConfigs { + if scfg == nil { + return errors.New("empty or null scrape config section") + } + // First set the correct scrape interval, then check that the timeout + // (inferred or explicit) is not greater than that. + if scfg.ScrapeInterval == 0 { + scfg.ScrapeInterval = c.GlobalConfig.ScrapeInterval + } + if scfg.ScrapeTimeout > scfg.ScrapeInterval { + return errors.Errorf("scrape timeout greater than scrape interval for scrape config with job name %q", scfg.JobName) + } + if scfg.ScrapeTimeout == 0 { + if c.GlobalConfig.ScrapeTimeout > scfg.ScrapeInterval { + scfg.ScrapeTimeout = scfg.ScrapeInterval + } else { + scfg.ScrapeTimeout = c.GlobalConfig.ScrapeTimeout + } + } + + if _, ok := jobNames[scfg.JobName]; ok { + return errors.Errorf("found multiple scrape configs with job name %q", scfg.JobName) + } + jobNames[scfg.JobName] = struct{}{} + } + rwNames := map[string]struct{}{} + for _, rwcfg := range c.RemoteWriteConfigs { + if rwcfg == nil { + return errors.New("empty or null remote write config section") + } + // Skip empty names, we fill their name with their config hash in remote write code. + if _, ok := rwNames[rwcfg.Name]; ok && rwcfg.Name != "" { + return errors.Errorf("found multiple remote write configs with job name %q", rwcfg.Name) + } + rwNames[rwcfg.Name] = struct{}{} + } + rrNames := map[string]struct{}{} + for _, rrcfg := range c.RemoteReadConfigs { + if rrcfg == nil { + return errors.New("empty or null remote read config section") + } + // Skip empty names, we fill their name with their config hash in remote read code. + if _, ok := rrNames[rrcfg.Name]; ok && rrcfg.Name != "" { + return errors.Errorf("found multiple remote read configs with job name %q", rrcfg.Name) + } + rrNames[rrcfg.Name] = struct{}{} + } + return nil +} + +// GlobalConfig configures values that are used across other configuration +// objects. +type GlobalConfig struct { + // How frequently to scrape targets by default. + ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"` + // The default timeout when scraping targets. + ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"` + // How frequently to evaluate rules by default. + EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"` + // File to which PromQL queries are logged. + QueryLogFile string `yaml:"query_log_file,omitempty"` + // The labels to add to any timeseries that this Prometheus instance scrapes. + ExternalLabels labels.Labels `yaml:"external_labels,omitempty"` +} + +// SetDirectory joins any relative file paths with dir. +func (c *GlobalConfig) SetDirectory(dir string) { + c.QueryLogFile = config.JoinDir(dir, c.QueryLogFile) +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + // Create a clean global config as the previous one was already populated + // by the default due to the YAML parser behavior for empty blocks. + gc := &GlobalConfig{} + type plain GlobalConfig + if err := unmarshal((*plain)(gc)); err != nil { + return err + } + + for _, l := range gc.ExternalLabels { + if !model.LabelName(l.Name).IsValid() { + return errors.Errorf("%q is not a valid label name", l.Name) + } + if !model.LabelValue(l.Value).IsValid() { + return errors.Errorf("%q is not a valid label value", l.Value) + } + } + + // First set the correct scrape interval, then check that the timeout + // (inferred or explicit) is not greater than that. + if gc.ScrapeInterval == 0 { + gc.ScrapeInterval = DefaultGlobalConfig.ScrapeInterval + } + if gc.ScrapeTimeout > gc.ScrapeInterval { + return errors.New("global scrape timeout greater than scrape interval") + } + if gc.ScrapeTimeout == 0 { + if DefaultGlobalConfig.ScrapeTimeout > gc.ScrapeInterval { + gc.ScrapeTimeout = gc.ScrapeInterval + } else { + gc.ScrapeTimeout = DefaultGlobalConfig.ScrapeTimeout + } + } + if gc.EvaluationInterval == 0 { + gc.EvaluationInterval = DefaultGlobalConfig.EvaluationInterval + } + *c = *gc + return nil +} + +// isZero returns true iff the global config is the zero value. +func (c *GlobalConfig) isZero() bool { + return c.ExternalLabels == nil && + c.ScrapeInterval == 0 && + c.ScrapeTimeout == 0 && + c.EvaluationInterval == 0 && + c.QueryLogFile == "" +} + +// ScrapeConfig configures a scraping unit for Prometheus. +type ScrapeConfig struct { + // The job name to which the job label is set by default. + JobName string `yaml:"job_name"` + // Indicator whether the scraped metrics should remain unmodified. + HonorLabels bool `yaml:"honor_labels,omitempty"` + // Indicator whether the scraped timestamps should be respected. + HonorTimestamps bool `yaml:"honor_timestamps"` + // A set of query parameters with which the target is scraped. + Params url.Values `yaml:"params,omitempty"` + // How frequently to scrape the targets of this scrape config. + ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"` + // The timeout for scraping targets of this config. + ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"` + // The HTTP resource path on which to fetch metrics from targets. + MetricsPath string `yaml:"metrics_path,omitempty"` + // The URL scheme with which to fetch metrics from targets. + Scheme string `yaml:"scheme,omitempty"` + // An uncompressed response body larger than this many bytes will cause the + // scrape to fail. 0 means no limit. + BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"` + // More than this many samples post metric-relabeling will cause the scrape to + // fail. + SampleLimit uint `yaml:"sample_limit,omitempty"` + // More than this many targets after the target relabeling will cause the + // scrapes to fail. + TargetLimit uint `yaml:"target_limit,omitempty"` + // More than this many labels post metric-relabeling will cause the scrape to + // fail. + LabelLimit uint `yaml:"label_limit,omitempty"` + // More than this label name length post metric-relabeling will cause the + // scrape to fail. + LabelNameLengthLimit uint `yaml:"label_name_length_limit,omitempty"` + // More than this label value length post metric-relabeling will cause the + // scrape to fail. + LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` + + // We cannot do proper Go type embedding below as the parser will then parse + // values arbitrarily into the overflow maps of further-down types. + + ServiceDiscoveryConfigs discovery.Configs `yaml:"-"` + HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` + + // List of target relabel configurations. + RelabelConfigs []*relabel.Config `yaml:"relabel_configs,omitempty"` + // List of metric relabel configurations. + MetricRelabelConfigs []*relabel.Config `yaml:"metric_relabel_configs,omitempty"` +} + +// SetDirectory joins any relative file paths with dir. +func (c *ScrapeConfig) SetDirectory(dir string) { + c.ServiceDiscoveryConfigs.SetDirectory(dir) + c.HTTPClientConfig.SetDirectory(dir) +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultScrapeConfig + if err := discovery.UnmarshalYAMLWithInlineConfigs(c, unmarshal); err != nil { + return err + } + if len(c.JobName) == 0 { + return errors.New("job_name is empty") + } + + // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer. + // We cannot make it a pointer as the parser panics for inlined pointer structs. + // Thus we just do its validation here. + if err := c.HTTPClientConfig.Validate(); err != nil { + return err + } + + // Check for users putting URLs in target groups. + if len(c.RelabelConfigs) == 0 { + if err := checkStaticTargets(c.ServiceDiscoveryConfigs); err != nil { + return err + } + } + + for _, rlcfg := range c.RelabelConfigs { + if rlcfg == nil { + return errors.New("empty or null target relabeling rule in scrape config") + } + } + for _, rlcfg := range c.MetricRelabelConfigs { + if rlcfg == nil { + return errors.New("empty or null metric relabeling rule in scrape config") + } + } + + return nil +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (c *ScrapeConfig) MarshalYAML() (interface{}, error) { + return discovery.MarshalYAMLWithInlineConfigs(c) +} + +// StorageConfig configures runtime reloadable configuration options. +type StorageConfig struct { + ExemplarsConfig *ExemplarsConfig `yaml:"exemplars,omitempty"` +} + +// ExemplarsConfig configures runtime reloadable configuration options. +type ExemplarsConfig struct { + // MaxExemplars sets the size, in # of exemplars stored, of the single circular buffer used to store exemplars in memory. + // Use a value of 0 or less than 0 to disable the storage without having to restart Prometheus. + MaxExemplars int64 `yaml:"max_exemplars,omitempty"` +} + +// AlertingConfig configures alerting and alertmanager related configs. +type AlertingConfig struct { + AlertRelabelConfigs []*relabel.Config `yaml:"alert_relabel_configs,omitempty"` + AlertmanagerConfigs AlertmanagerConfigs `yaml:"alertmanagers,omitempty"` +} + +// SetDirectory joins any relative file paths with dir. +func (c *AlertingConfig) SetDirectory(dir string) { + for _, c := range c.AlertmanagerConfigs { + c.SetDirectory(dir) + } +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *AlertingConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + // Create a clean global config as the previous one was already populated + // by the default due to the YAML parser behavior for empty blocks. + *c = AlertingConfig{} + type plain AlertingConfig + if err := unmarshal((*plain)(c)); err != nil { + return err + } + + for _, rlcfg := range c.AlertRelabelConfigs { + if rlcfg == nil { + return errors.New("empty or null alert relabeling rule") + } + } + return nil +} + +// AlertmanagerConfigs is a slice of *AlertmanagerConfig. +type AlertmanagerConfigs []*AlertmanagerConfig + +// ToMap converts a slice of *AlertmanagerConfig to a map. +func (a AlertmanagerConfigs) ToMap() map[string]*AlertmanagerConfig { + ret := make(map[string]*AlertmanagerConfig) + for i := range a { + ret[fmt.Sprintf("config-%d", i)] = a[i] + } + return ret +} + +// AlertmanagerAPIVersion represents a version of the +// github.com/prometheus/alertmanager/api, e.g. 'v1' or 'v2'. +type AlertmanagerAPIVersion string + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (v *AlertmanagerAPIVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { + *v = AlertmanagerAPIVersion("") + type plain AlertmanagerAPIVersion + if err := unmarshal((*plain)(v)); err != nil { + return err + } + + for _, supportedVersion := range SupportedAlertmanagerAPIVersions { + if *v == supportedVersion { + return nil + } + } + + return fmt.Errorf("expected Alertmanager api version to be one of %v but got %v", SupportedAlertmanagerAPIVersions, *v) +} + +const ( + // AlertmanagerAPIVersionV1 represents + // github.com/prometheus/alertmanager/api/v1. + AlertmanagerAPIVersionV1 AlertmanagerAPIVersion = "v1" + // AlertmanagerAPIVersionV2 represents + // github.com/prometheus/alertmanager/api/v2. + AlertmanagerAPIVersionV2 AlertmanagerAPIVersion = "v2" +) + +var SupportedAlertmanagerAPIVersions = []AlertmanagerAPIVersion{ + AlertmanagerAPIVersionV1, AlertmanagerAPIVersionV2, +} + +// AlertmanagerConfig configures how Alertmanagers can be discovered and communicated with. +type AlertmanagerConfig struct { + // We cannot do proper Go type embedding below as the parser will then parse + // values arbitrarily into the overflow maps of further-down types. + + ServiceDiscoveryConfigs discovery.Configs `yaml:"-"` + HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` + + // The URL scheme to use when talking to Alertmanagers. + Scheme string `yaml:"scheme,omitempty"` + // Path prefix to add in front of the push endpoint path. + PathPrefix string `yaml:"path_prefix,omitempty"` + // The timeout used when sending alerts. + Timeout model.Duration `yaml:"timeout,omitempty"` + + // The api version of Alertmanager. + APIVersion AlertmanagerAPIVersion `yaml:"api_version"` + + // List of Alertmanager relabel configurations. + RelabelConfigs []*relabel.Config `yaml:"relabel_configs,omitempty"` +} + +// SetDirectory joins any relative file paths with dir. +func (c *AlertmanagerConfig) SetDirectory(dir string) { + c.ServiceDiscoveryConfigs.SetDirectory(dir) + c.HTTPClientConfig.SetDirectory(dir) +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultAlertmanagerConfig + if err := discovery.UnmarshalYAMLWithInlineConfigs(c, unmarshal); err != nil { + return err + } + + // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer. + // We cannot make it a pointer as the parser panics for inlined pointer structs. + // Thus we just do its validation here. + if err := c.HTTPClientConfig.Validate(); err != nil { + return err + } + + // Check for users putting URLs in target groups. + if len(c.RelabelConfigs) == 0 { + if err := checkStaticTargets(c.ServiceDiscoveryConfigs); err != nil { + return err + } + } + + for _, rlcfg := range c.RelabelConfigs { + if rlcfg == nil { + return errors.New("empty or null Alertmanager target relabeling rule") + } + } + + return nil +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (c *AlertmanagerConfig) MarshalYAML() (interface{}, error) { + return discovery.MarshalYAMLWithInlineConfigs(c) +} + +func checkStaticTargets(configs discovery.Configs) error { + for _, cfg := range configs { + sc, ok := cfg.(discovery.StaticConfig) + if !ok { + continue + } + for _, tg := range sc { + for _, t := range tg.Targets { + if err := CheckTargetAddress(t[model.AddressLabel]); err != nil { + return err + } + } + } + } + return nil +} + +// CheckTargetAddress checks if target address is valid. +func CheckTargetAddress(address model.LabelValue) error { + // For now check for a URL, we may want to expand this later. + if strings.Contains(string(address), "/") { + return errors.Errorf("%q is not a valid hostname", address) + } + return nil +} + +// RemoteWriteConfig is the configuration for writing to remote storage. +type RemoteWriteConfig struct { + URL *config.URL `yaml:"url"` + RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` + Headers map[string]string `yaml:"headers,omitempty"` + WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"` + Name string `yaml:"name,omitempty"` + SendExemplars bool `yaml:"send_exemplars,omitempty"` + + // We cannot do proper Go type embedding below as the parser will then parse + // values arbitrarily into the overflow maps of further-down types. + HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` + QueueConfig QueueConfig `yaml:"queue_config,omitempty"` + MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"` + SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"` +} + +// SetDirectory joins any relative file paths with dir. +func (c *RemoteWriteConfig) SetDirectory(dir string) { + c.HTTPClientConfig.SetDirectory(dir) +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultRemoteWriteConfig + type plain RemoteWriteConfig + if err := unmarshal((*plain)(c)); err != nil { + return err + } + if c.URL == nil { + return errors.New("url for remote_write is empty") + } + for _, rlcfg := range c.WriteRelabelConfigs { + if rlcfg == nil { + return errors.New("empty or null relabeling rule in remote write config") + } + } + if err := validateHeaders(c.Headers); err != nil { + return err + } + + // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer. + // We cannot make it a pointer as the parser panics for inlined pointer structs. + // Thus we just do its validation here. + if err := c.HTTPClientConfig.Validate(); err != nil { + return err + } + + httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil || + c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil + + if httpClientConfigAuthEnabled && c.SigV4Config != nil { + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured") + } + + return nil +} + +func validateHeaders(headers map[string]string) error { + for header := range headers { + if strings.ToLower(header) == "authorization" { + return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter") + } + if _, ok := reservedHeaders[strings.ToLower(header)]; ok { + return errors.Errorf("%s is a reserved header. It must not be changed", header) + } + } + return nil +} + +// QueueConfig is the configuration for the queue used to write to remote +// storage. +type QueueConfig struct { + // Number of samples to buffer per shard before we block. Defaults to + // MaxSamplesPerSend. + Capacity int `yaml:"capacity,omitempty"` + + // Max number of shards, i.e. amount of concurrency. + MaxShards int `yaml:"max_shards,omitempty"` + + // Min number of shards, i.e. amount of concurrency. + MinShards int `yaml:"min_shards,omitempty"` + + // Maximum number of samples per send. + MaxSamplesPerSend int `yaml:"max_samples_per_send,omitempty"` + + // Maximum time sample will wait in buffer. + BatchSendDeadline model.Duration `yaml:"batch_send_deadline,omitempty"` + + // On recoverable errors, backoff exponentially. + MinBackoff model.Duration `yaml:"min_backoff,omitempty"` + MaxBackoff model.Duration `yaml:"max_backoff,omitempty"` + RetryOnRateLimit bool `yaml:"retry_on_http_429,omitempty"` +} + +// MetadataConfig is the configuration for sending metadata to remote +// storage. +type MetadataConfig struct { + // Send controls whether we send metric metadata to remote storage. + Send bool `yaml:"send"` + // SendInterval controls how frequently we send metric metadata. + SendInterval model.Duration `yaml:"send_interval"` + // Maximum number of samples per send. + MaxSamplesPerSend int `yaml:"max_samples_per_send,omitempty"` +} + +// RemoteReadConfig is the configuration for reading from remote storage. +type RemoteReadConfig struct { + URL *config.URL `yaml:"url"` + RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` + Headers map[string]string `yaml:"headers,omitempty"` + ReadRecent bool `yaml:"read_recent,omitempty"` + Name string `yaml:"name,omitempty"` + + // We cannot do proper Go type embedding below as the parser will then parse + // values arbitrarily into the overflow maps of further-down types. + HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` + + // RequiredMatchers is an optional list of equality matchers which have to + // be present in a selector to query the remote read endpoint. + RequiredMatchers model.LabelSet `yaml:"required_matchers,omitempty"` +} + +// SetDirectory joins any relative file paths with dir. +func (c *RemoteReadConfig) SetDirectory(dir string) { + c.HTTPClientConfig.SetDirectory(dir) +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *RemoteReadConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultRemoteReadConfig + type plain RemoteReadConfig + if err := unmarshal((*plain)(c)); err != nil { + return err + } + if c.URL == nil { + return errors.New("url for remote_read is empty") + } + if err := validateHeaders(c.Headers); err != nil { + return err + } + // The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer. + // We cannot make it a pointer as the parser panics for inlined pointer structs. + // Thus we just do its validation here. + return c.HTTPClientConfig.Validate() +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/README.md b/vendor/github.com/prometheus/prometheus/discovery/README.md new file mode 100644 index 00000000000..8854981a041 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/discovery/README.md @@ -0,0 +1,267 @@ +# Service Discovery + +This directory contains the service discovery (SD) component of Prometheus. + +## Design of a Prometheus SD + +There are many requests to add new SDs to Prometheus, this section looks at +what makes a good SD and covers some of the common implementation issues. + +### Does this make sense as an SD? + +The first question to be asked is does it make sense to add this particular +SD? An SD mechanism should be reasonably well established, and at a minimum in +use across multiple organizations. It should allow discovering of machines +and/or services running somewhere. When exactly an SD is popular enough to +justify being added to Prometheus natively is an open question. + +Note: As part of lifting the past moratorium on new SD implementations it was +agreed that, in addition to the existing requirements, new service discovery +implementations will be required to have a committed maintainer with push access (i.e., on -team). + +It should not be a brand new SD mechanism, or a variant of an established +mechanism. We want to integrate Prometheus with the SD that's already there in +your infrastructure, not invent yet more ways to do service discovery. We also +do not add mechanisms to work around users lacking service discovery and/or +configuration management infrastructure. + +SDs that merely discover other applications running the same software (e.g. +talk to one Kafka or Cassandra server to find the others) are not service +discovery. In that case the SD you should be looking at is whatever decides +that a machine is going to be a Kafka server, likely a machine database or +configuration management system. + +If something is particularly custom or unusual, `file_sd` is the generic +mechanism provided for users to hook in. Generally with Prometheus we offer a +single generic mechanism for things with infinite variations, rather than +trying to support everything natively (see also, alertmanager webhook, remote +read, remote write, node exporter textfile collector). For example anything +that would involve talking to a relational database should use `file_sd` +instead. + +For configuration management systems like Chef, while they do have a +database/API that'd in principle make sense to talk to for service discovery, +the idiomatic approach is to use Chef's templating facilities to write out a +file for use with `file_sd`. + + +### Mapping from SD to Prometheus + +The general principle with SD is to extract all the potentially useful +information we can out of the SD, and let the user choose what they need of it +using +[relabelling](https://prometheus.io/docs/operating/configuration/#). +This information is generally termed metadata. + +Metadata is exposed as a set of key/value pairs (labels) per target. The keys +are prefixed with `__meta__`, and there should also be an `__address__` +label with the host:port of the target (preferably an IP address to avoid DNS +lookups). No other labelnames should be exposed. + +It is very common for initial pull requests for new SDs to include hardcoded +assumptions that make sense for the author's setup. SD should be generic, +any customisation should be handled via relabelling. There should be basically +no business logic, filtering, or transformations of the data from the SD beyond +that which is needed to fit it into the metadata data model. + +Arrays (e.g. a list of tags) should be converted to a single label with the +array values joined with a comma. Also prefix and suffix the value with a +comma. So for example the array `[a, b, c]` would become `,a,b,c,`. As +relabelling regexes are fully anchored, this makes it easier to write correct +regexes against (`.*,a,.*` works no matter where `a` appears in the list). The +canonical example of this is `__meta_consul_tags`. + +Maps, hashes and other forms of key/value pairs should be all prefixed and +exposed as labels. For example for EC2 tags, there would be +`__meta_ec2_tag_Description=mydescription` for the Description tag. Labelnames +may only contain `[_a-zA-Z0-9]`, sanitize by replacing with underscores as needed. + +For targets with multiple potential ports, you can a) expose them as a list, b) +if they're named expose them as a map or c) expose them each as their own +target. Kubernetes SD takes the target per port approach. a) and b) can be +combined. + +For machine-like SDs (OpenStack, EC2, Kubernetes to some extent) there may +be multiple network interfaces for a target. Thus far reporting the details +of only the first/primary network interface has sufficed. + + +### Other implementation considerations + +SDs are intended to dump all possible targets. For example the optional use of +EC2 service discovery would be to take the entire region's worth of EC2 +instances it provides and do everything needed in one `scrape_config`. For +large deployments where you are only interested in a small proportion of the +returned targets, this may cause performance issues. If this occurs it is +acceptable to also offer filtering via whatever mechanisms the SD exposes. For +EC2 that would be the `Filter` option on `DescribeInstances`. Keep in mind that +this is a performance optimisation, it should be possible to do the same +filtering using relabelling alone. As with SD generally, we do not invent new +ways to filter targets (that is what relabelling is for), merely offer up +whatever functionality the SD itself offers. + +It is a general rule with Prometheus that all configuration comes from the +configuration file. While the libraries you use to talk to the SD may also +offer other mechanisms for providing configuration/authentication under the +covers (EC2's use of environment variables being a prime example), using your SD +mechanism should not require this. Put another way, your SD implementation +should not read environment variables or files to obtain configuration. + +Some SD mechanisms have rate limits that make them challenging to use. As an +example we have unfortunately had to reject Amazon ECS service discovery due to +the rate limits being so low that it would not be usable for anything beyond +small setups. + +If a system offers multiple distinct types of SD, select which is in use with a +configuration option rather than returning them all from one mega SD that +requires relabelling to select just the one you want. So far we have only seen +this with Kubernetes. When a single SD with a selector vs. multiple distinct +SDs makes sense is an open question. + +If there is a failure while processing talking to the SD, abort rather than +returning partial data. It is better to work from stale targets than partial +or incorrect metadata. + +The information obtained from service discovery is not considered sensitive +security wise. Do not return secrets in metadata, anyone with access to +the Prometheus server will be able to see them. + + +## Writing an SD mechanism + +### The SD interface + +A Service Discovery (SD) mechanism has to discover targets and provide them to Prometheus. We expect similar targets to be grouped together, in the form of a [target group](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/discovery/targetgroup#Group). The SD mechanism sends the targets down to prometheus as list of target groups. + +An SD mechanism has to implement the `Discoverer` Interface: +```go +type Discoverer interface { + Run(ctx context.Context, up chan<- []*targetgroup.Group) +} +``` + +Prometheus will call the `Run()` method on a provider to initialize the discovery mechanism. The mechanism will then send *all* the target groups into the channel. +Now the mechanism will watch for changes. For each update it can send all target groups, or only changed and new target groups, down the channel. `Manager` will handle +both cases. + +For example if we had a discovery mechanism and it retrieves the following groups: + +```go +[]targetgroup.Group{ + { + Targets: []model.LabelSet{ + { + "__instance__": "10.11.150.1:7870", + "hostname": "demo-target-1", + "test": "simple-test", + }, + { + "__instance__": "10.11.150.4:7870", + "hostname": "demo-target-2", + "test": "simple-test", + }, + }, + Labels: model.LabelSet{ + "job": "mysql", + }, + "Source": "file1", + }, + { + Targets: []model.LabelSet{ + { + "__instance__": "10.11.122.11:6001", + "hostname": "demo-postgres-1", + "test": "simple-test", + }, + { + "__instance__": "10.11.122.15:6001", + "hostname": "demo-postgres-2", + "test": "simple-test", + }, + }, + Labels: model.LabelSet{ + "job": "postgres", + }, + "Source": "file2", + }, +} +``` + +Here there are two target groups one group with source `file1` and another with `file2`. The grouping is implementation specific and could even be one target per group. But, one has to make sure every target group sent by an SD instance should have a `Source` which is unique across all the target groups of that SD instance. + +In this case, both the target groups are sent down the channel the first time `Run()` is called. Now, for an update, we need to send the whole _changed_ target group down the channel. i.e, if the target with `hostname: demo-postgres-2` goes away, we send: +```go +&targetgroup.Group{ + Targets: []model.LabelSet{ + { + "__instance__": "10.11.122.11:6001", + "hostname": "demo-postgres-1", + "test": "simple-test", + }, + }, + Labels: model.LabelSet{ + "job": "postgres", + }, + "Source": "file2", +} +``` +down the channel. + +If all the targets in a group go away, we need to send the target groups with empty `Targets` down the channel. i.e, if all targets with `job: postgres` go away, we send: +```go +&targetgroup.Group{ + Targets: nil, + "Source": "file2", +} +``` +down the channel. + +### The Config interface + +Now that your service discovery mechanism is ready to discover targets, you must help +Prometheus discover it. This is done by implementing the `discovery.Config` interface +and registering it with `discovery.RegisterConfig` in an init function of your package. + +```go +type Config interface { + // Name returns the name of the discovery mechanism. + Name() string + + // NewDiscoverer returns a Discoverer for the Config + // with the given DiscovererOptions. + NewDiscoverer(DiscovererOptions) (Discoverer, error) +} + +type DiscovererOptions struct { + Logger log.Logger +} +``` + +The value returned by `Name()` should be short, descriptive, lowercase, and unique. +It's used to tag the provided `Logger` and as the part of the YAML key for your SD +mechanism's list of configs in `scrape_config` and `alertmanager_config` +(e.g. `${NAME}_sd_configs`). + +### New Service Discovery Check List + +Here are some non-obvious parts of adding service discoveries that need to be verified: + +- Validate that discovery configs can be DeepEqualled by adding them to + `config/testdata/conf.good.yml` and to the associated tests. + +- If the config contains file paths directly or indirectly (e.g. with a TLSConfig or + HTTPClientConfig field), then it must implement `config.DirectorySetter`. + +- Import your SD package from `prometheus/discovery/install`. The install package is + imported from `main` to register all builtin SD mechanisms. + +- List the service discovery in both `` and + `` in `docs/configuration/configuration.md`. + + + +### Examples of Service Discovery pull requests + +The examples given might become out of date but should give a good impression about the areas touched by a new service discovery. + +- [Eureka](https://github.com/prometheus/prometheus/pull/3369) diff --git a/vendor/github.com/prometheus/prometheus/discovery/discovery.go b/vendor/github.com/prometheus/prometheus/discovery/discovery.go new file mode 100644 index 00000000000..f2b87d99255 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/discovery/discovery.go @@ -0,0 +1,117 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "context" + "reflect" + + "github.com/go-kit/log" + "github.com/prometheus/common/config" + + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +// Discoverer provides information about target groups. It maintains a set +// of sources from which TargetGroups can originate. Whenever a discovery provider +// detects a potential change, it sends the TargetGroup through its channel. +// +// Discoverer does not know if an actual change happened. +// It does guarantee that it sends the new TargetGroup whenever a change happens. +// +// Discoverers should initially send a full set of all discoverable TargetGroups. +type Discoverer interface { + // Run hands a channel to the discovery provider (Consul, DNS, etc.) through which + // it can send updated target groups. It must return when the context is canceled. + // It should not close the update channel on returning. + Run(ctx context.Context, up chan<- []*targetgroup.Group) +} + +// DiscovererOptions provides options for a Discoverer. +type DiscovererOptions struct { + Logger log.Logger +} + +// A Config provides the configuration and constructor for a Discoverer. +type Config interface { + // Name returns the name of the discovery mechanism. + Name() string + + // NewDiscoverer returns a Discoverer for the Config + // with the given DiscovererOptions. + NewDiscoverer(DiscovererOptions) (Discoverer, error) +} + +// Configs is a slice of Config values that uses custom YAML marshaling and unmarshaling +// to represent itself as a mapping of the Config values grouped by their types. +type Configs []Config + +// SetDirectory joins any relative file paths with dir. +func (c *Configs) SetDirectory(dir string) { + for _, c := range *c { + if v, ok := c.(config.DirectorySetter); ok { + v.SetDirectory(dir) + } + } +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { + cfgTyp := getConfigType(configsType) + cfgPtr := reflect.New(cfgTyp) + cfgVal := cfgPtr.Elem() + + if err := unmarshal(cfgPtr.Interface()); err != nil { + return replaceYAMLTypeError(err, cfgTyp, configsType) + } + + var err error + *c, err = readConfigs(cfgVal, 0) + return err +} + +// MarshalYAML implements yaml.Marshaler. +func (c Configs) MarshalYAML() (interface{}, error) { + cfgTyp := getConfigType(configsType) + cfgPtr := reflect.New(cfgTyp) + cfgVal := cfgPtr.Elem() + + if err := writeConfigs(cfgVal, c); err != nil { + return nil, err + } + + return cfgPtr.Interface(), nil +} + +// A StaticConfig is a Config that provides a static list of targets. +type StaticConfig []*targetgroup.Group + +// Name returns the name of the service discovery mechanism. +func (StaticConfig) Name() string { return "static" } + +// NewDiscoverer returns a Discoverer for the Config. +func (c StaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { + return staticDiscoverer(c), nil +} + +type staticDiscoverer []*targetgroup.Group + +func (c staticDiscoverer) Run(ctx context.Context, up chan<- []*targetgroup.Group) { + // TODO: existing implementation closes up chan, but documentation explicitly forbids it...? + defer close(up) + select { + case <-ctx.Done(): + case up <- c: + } +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go new file mode 100644 index 00000000000..e10cfc7bd3b --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -0,0 +1,453 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "context" + "fmt" + "reflect" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +var ( + failedConfigs = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "prometheus_sd_failed_configs", + Help: "Current number of service discovery configurations that failed to load.", + }, + []string{"name"}, + ) + discoveredTargets = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "prometheus_sd_discovered_targets", + Help: "Current number of discovered targets.", + }, + []string{"name", "config"}, + ) + receivedUpdates = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "prometheus_sd_received_updates_total", + Help: "Total number of update events received from the SD providers.", + }, + []string{"name"}, + ) + delayedUpdates = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "prometheus_sd_updates_delayed_total", + Help: "Total number of update events that couldn't be sent immediately.", + }, + []string{"name"}, + ) + sentUpdates = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "prometheus_sd_updates_total", + Help: "Total number of update events sent to the SD consumers.", + }, + []string{"name"}, + ) +) + +func RegisterMetrics() { + prometheus.MustRegister(failedConfigs, discoveredTargets, receivedUpdates, delayedUpdates, sentUpdates) +} + +type poolKey struct { + setName string + provider string +} + +// provider holds a Discoverer instance, its configuration, cancel func and its subscribers. +type provider struct { + name string + d Discoverer + config interface{} + + cancel context.CancelFunc + // done should be called after cleaning up resources associated with cancelled provider. + done func() + + mu sync.RWMutex + subs map[string]struct{} + + // newSubs is used to temporary store subs to be used upon config reload completion. + newSubs map[string]struct{} +} + +// IsStarted return true if Discoverer is started. +func (p *provider) IsStarted() bool { + return p.cancel != nil +} + +// NewManager is the Discovery Manager constructor. +func NewManager(ctx context.Context, logger log.Logger, options ...func(*Manager)) *Manager { + if logger == nil { + logger = log.NewNopLogger() + } + mgr := &Manager{ + logger: logger, + syncCh: make(chan map[string][]*targetgroup.Group), + targets: make(map[poolKey]map[string]*targetgroup.Group), + ctx: ctx, + updatert: 5 * time.Second, + triggerSend: make(chan struct{}, 1), + } + for _, option := range options { + option(mgr) + } + return mgr +} + +// Name sets the name of the manager. +func Name(n string) func(*Manager) { + return func(m *Manager) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.name = n + } +} + +// Manager maintains a set of discovery providers and sends each update to a map channel. +// Targets are grouped by the target set name. +type Manager struct { + logger log.Logger + name string + mtx sync.RWMutex + ctx context.Context + + // Some Discoverers(e.g. k8s) send only the updates for a given target group, + // so we use map[tg.Source]*targetgroup.Group to know which group to update. + targets map[poolKey]map[string]*targetgroup.Group + targetsMtx sync.Mutex + + // providers keeps track of SD providers. + providers []*provider + // The sync channel sends the updates as a map where the key is the job value from the scrape config. + syncCh chan map[string][]*targetgroup.Group + + // How long to wait before sending updates to the channel. The variable + // should only be modified in unit tests. + updatert time.Duration + + // The triggerSend channel signals to the Manager that new updates have been received from providers. + triggerSend chan struct{} + + // lastProvider counts providers registered during Manager's lifetime. + lastProvider uint +} + +// Run starts the background processing. +func (m *Manager) Run() error { + go m.sender() + for range m.ctx.Done() { + m.cancelDiscoverers() + return m.ctx.Err() + } + return nil +} + +// SyncCh returns a read only channel used by all the clients to receive target updates. +func (m *Manager) SyncCh() <-chan map[string][]*targetgroup.Group { + return m.syncCh +} + +// ApplyConfig checks if discovery provider with supplied config is already running and keeps them as is. +// Remaining providers are then stopped and new required providers are started using the provided config. +func (m *Manager) ApplyConfig(cfg map[string]Configs) error { + m.mtx.Lock() + defer m.mtx.Unlock() + + var failedCount int + for name, scfg := range cfg { + failedCount += m.registerProviders(scfg, name) + } + failedConfigs.WithLabelValues(m.name).Set(float64(failedCount)) + + var ( + wg sync.WaitGroup + // keep shows if we keep any providers after reload. + keep bool + newProviders []*provider + ) + for _, prov := range m.providers { + // Cancel obsolete providers. + if len(prov.newSubs) == 0 { + wg.Add(1) + prov.done = func() { + wg.Done() + } + prov.cancel() + continue + } + newProviders = append(newProviders, prov) + // refTargets keeps reference targets used to populate new subs' targets + var refTargets map[string]*targetgroup.Group + prov.mu.Lock() + + m.targetsMtx.Lock() + for s := range prov.subs { + keep = true + refTargets = m.targets[poolKey{s, prov.name}] + // Remove obsolete subs' targets. + if _, ok := prov.newSubs[s]; !ok { + delete(m.targets, poolKey{s, prov.name}) + discoveredTargets.DeleteLabelValues(m.name, s) + } + } + // Set metrics and targets for new subs. + for s := range prov.newSubs { + if _, ok := prov.subs[s]; !ok { + discoveredTargets.WithLabelValues(m.name, s).Set(0) + } + if l := len(refTargets); l > 0 { + m.targets[poolKey{s, prov.name}] = make(map[string]*targetgroup.Group, l) + for k, v := range refTargets { + m.targets[poolKey{s, prov.name}][k] = v + } + } + } + m.targetsMtx.Unlock() + + prov.subs = prov.newSubs + prov.newSubs = map[string]struct{}{} + prov.mu.Unlock() + if !prov.IsStarted() { + m.startProvider(m.ctx, prov) + } + } + // Currently downstream managers expect full target state upon config reload, so we must oblige. + // While startProvider does pull the trigger, it may take some time to do so, therefore + // we pull the trigger as soon as possible so that downstream managers can populate their state. + // See https://github.com/prometheus/prometheus/pull/8639 for details. + if keep { + select { + case m.triggerSend <- struct{}{}: + default: + } + } + m.providers = newProviders + wg.Wait() + + return nil +} + +// StartCustomProvider is used for sdtool. Only use this if you know what you're doing. +func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker Discoverer) { + p := &provider{ + name: name, + d: worker, + subs: map[string]struct{}{ + name: {}, + }, + } + m.providers = append(m.providers, p) + m.startProvider(ctx, p) +} + +func (m *Manager) startProvider(ctx context.Context, p *provider) { + level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) + ctx, cancel := context.WithCancel(ctx) + updates := make(chan []*targetgroup.Group) + + p.cancel = cancel + + go p.d.Run(ctx, updates) + go m.updater(ctx, p, updates) +} + +// cleaner cleans resources associated with provider. +func (m *Manager) cleaner(p *provider) { + m.targetsMtx.Lock() + p.mu.RLock() + for s := range p.subs { + delete(m.targets, poolKey{s, p.name}) + } + p.mu.RUnlock() + m.targetsMtx.Unlock() + if p.done != nil { + p.done() + } +} + +func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targetgroup.Group) { + // Ensure targets from this provider are cleaned up. + defer m.cleaner(p) + for { + select { + case <-ctx.Done(): + return + case tgs, ok := <-updates: + receivedUpdates.WithLabelValues(m.name).Inc() + if !ok { + level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) + // Wait for provider cancellation to ensure targets are cleaned up when expected. + <-ctx.Done() + return + } + + p.mu.RLock() + for s := range p.subs { + m.updateGroup(poolKey{setName: s, provider: p.name}, tgs) + } + p.mu.RUnlock() + + select { + case m.triggerSend <- struct{}{}: + default: + } + } + } +} + +func (m *Manager) sender() { + ticker := time.NewTicker(m.updatert) + defer ticker.Stop() + + for { + select { + case <-m.ctx.Done(): + return + case <-ticker.C: // Some discoverers send updates too often, so we throttle these with the ticker. + select { + case <-m.triggerSend: + sentUpdates.WithLabelValues(m.name).Inc() + select { + case m.syncCh <- m.allGroups(): + default: + delayedUpdates.WithLabelValues(m.name).Inc() + level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") + select { + case m.triggerSend <- struct{}{}: + default: + } + } + default: + } + } + } +} + +func (m *Manager) cancelDiscoverers() { + m.mtx.RLock() + defer m.mtx.RUnlock() + for _, p := range m.providers { + if p.cancel != nil { + p.cancel() + } + } +} + +func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { + m.targetsMtx.Lock() + defer m.targetsMtx.Unlock() + + if _, ok := m.targets[poolKey]; !ok { + m.targets[poolKey] = make(map[string]*targetgroup.Group) + } + for _, tg := range tgs { + if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. + m.targets[poolKey][tg.Source] = tg + } + } +} + +func (m *Manager) allGroups() map[string][]*targetgroup.Group { + tSets := map[string][]*targetgroup.Group{} + n := map[string]int{} + + m.targetsMtx.Lock() + defer m.targetsMtx.Unlock() + for pkey, tsets := range m.targets { + for _, tg := range tsets { + // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' + // to signal that it needs to stop all scrape loops for this target set. + tSets[pkey.setName] = append(tSets[pkey.setName], tg) + n[pkey.setName] += len(tg.Targets) + } + } + for setName, v := range n { + discoveredTargets.WithLabelValues(m.name, setName).Set(float64(v)) + } + return tSets +} + +// registerProviders returns a number of failed SD config. +func (m *Manager) registerProviders(cfgs Configs, setName string) int { + var ( + failed int + added bool + ) + add := func(cfg Config) { + for _, p := range m.providers { + if reflect.DeepEqual(cfg, p.config) { + p.newSubs[setName] = struct{}{} + added = true + return + } + } + typ := cfg.Name() + d, err := cfg.NewDiscoverer(DiscovererOptions{ + Logger: log.With(m.logger, "discovery", typ), + }) + if err != nil { + level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ) + failed++ + return + } + m.providers = append(m.providers, &provider{ + name: fmt.Sprintf("%s/%d", typ, m.lastProvider), + d: d, + config: cfg, + newSubs: map[string]struct{}{ + setName: {}, + }, + }) + m.lastProvider++ + added = true + } + for _, cfg := range cfgs { + add(cfg) + } + if !added { + // Add an empty target group to force the refresh of the corresponding + // scrape pool and to notify the receiver that this target set has no + // current targets. + // It can happen because the combined set of SD configurations is empty + // or because we fail to instantiate all the SD configurations. + add(StaticConfig{{}}) + } + return failed +} + +// StaticProvider holds a list of target groups that never change. +type StaticProvider struct { + TargetGroups []*targetgroup.Group +} + +// Run implements the Worker interface. +func (sd *StaticProvider) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { + // We still have to consider that the consumer exits right away in which case + // the context will be canceled. + select { + case ch <- sd.TargetGroups: + case <-ctx.Done(): + } + close(ch) +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/registry.go b/vendor/github.com/prometheus/prometheus/discovery/registry.go new file mode 100644 index 00000000000..2ebb36cb290 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/discovery/registry.go @@ -0,0 +1,258 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package discovery + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "sync" + + "gopkg.in/yaml.v2" + + "github.com/prometheus/prometheus/discovery/targetgroup" +) + +const ( + configFieldPrefix = "AUTO_DISCOVERY_" + staticConfigsKey = "static_configs" + staticConfigsFieldName = configFieldPrefix + staticConfigsKey +) + +var ( + configNames = make(map[string]Config) + configFieldNames = make(map[reflect.Type]string) + configFields []reflect.StructField + + configTypesMu sync.Mutex + configTypes = make(map[reflect.Type]reflect.Type) + + emptyStructType = reflect.TypeOf(struct{}{}) + configsType = reflect.TypeOf(Configs{}) +) + +// RegisterConfig registers the given Config type for YAML marshaling and unmarshaling. +func RegisterConfig(config Config) { + registerConfig(config.Name()+"_sd_configs", reflect.TypeOf(config), config) +} + +func init() { + // N.B.: static_configs is the only Config type implemented by default. + // All other types are registered at init by their implementing packages. + elemTyp := reflect.TypeOf(&targetgroup.Group{}) + registerConfig(staticConfigsKey, elemTyp, StaticConfig{}) +} + +func registerConfig(yamlKey string, elemType reflect.Type, config Config) { + name := config.Name() + if _, ok := configNames[name]; ok { + panic(fmt.Sprintf("discovery: Config named %q is already registered", name)) + } + configNames[name] = config + + fieldName := configFieldPrefix + yamlKey // Field must be exported. + configFieldNames[elemType] = fieldName + + // Insert fields in sorted order. + i := sort.Search(len(configFields), func(k int) bool { + return fieldName < configFields[k].Name + }) + configFields = append(configFields, reflect.StructField{}) // Add empty field at end. + copy(configFields[i+1:], configFields[i:]) // Shift fields to the right. + configFields[i] = reflect.StructField{ // Write new field in place. + Name: fieldName, + Type: reflect.SliceOf(elemType), + Tag: reflect.StructTag(`yaml:"` + yamlKey + `,omitempty"`), + } +} + +func getConfigType(out reflect.Type) reflect.Type { + configTypesMu.Lock() + defer configTypesMu.Unlock() + if typ, ok := configTypes[out]; ok { + return typ + } + // Initial exported fields map one-to-one. + var fields []reflect.StructField + for i, n := 0, out.NumField(); i < n; i++ { + switch field := out.Field(i); { + case field.PkgPath == "" && field.Type != configsType: + fields = append(fields, field) + default: + fields = append(fields, reflect.StructField{ + Name: "_" + field.Name, // Field must be unexported. + PkgPath: out.PkgPath(), + Type: emptyStructType, + }) + } + } + // Append extra config fields on the end. + fields = append(fields, configFields...) + typ := reflect.StructOf(fields) + configTypes[out] = typ + return typ +} + +// UnmarshalYAMLWithInlineConfigs helps implement yaml.Unmarshal for structs +// that have a Configs field that should be inlined. +func UnmarshalYAMLWithInlineConfigs(out interface{}, unmarshal func(interface{}) error) error { + outVal := reflect.ValueOf(out) + if outVal.Kind() != reflect.Ptr { + return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out) + } + outVal = outVal.Elem() + if outVal.Kind() != reflect.Struct { + return fmt.Errorf("discovery: can only unmarshal into a struct pointer: %T", out) + } + outTyp := outVal.Type() + + cfgTyp := getConfigType(outTyp) + cfgPtr := reflect.New(cfgTyp) + cfgVal := cfgPtr.Elem() + + // Copy shared fields (defaults) to dynamic value. + var configs *Configs + for i, n := 0, outVal.NumField(); i < n; i++ { + if outTyp.Field(i).Type == configsType { + configs = outVal.Field(i).Addr().Interface().(*Configs) + continue + } + if cfgTyp.Field(i).PkgPath != "" { + continue // Field is unexported: ignore. + } + cfgVal.Field(i).Set(outVal.Field(i)) + } + if configs == nil { + return fmt.Errorf("discovery: Configs field not found in type: %T", out) + } + + // Unmarshal into dynamic value. + if err := unmarshal(cfgPtr.Interface()); err != nil { + return replaceYAMLTypeError(err, cfgTyp, outTyp) + } + + // Copy shared fields from dynamic value. + for i, n := 0, outVal.NumField(); i < n; i++ { + if cfgTyp.Field(i).PkgPath != "" { + continue // Field is unexported: ignore. + } + outVal.Field(i).Set(cfgVal.Field(i)) + } + + var err error + *configs, err = readConfigs(cfgVal, outVal.NumField()) + return err +} + +func readConfigs(structVal reflect.Value, startField int) (Configs, error) { + var ( + configs Configs + targets []*targetgroup.Group + ) + for i, n := startField, structVal.NumField(); i < n; i++ { + field := structVal.Field(i) + if field.Kind() != reflect.Slice { + panic("discovery: internal error: field is not a slice") + } + for k := 0; k < field.Len(); k++ { + val := field.Index(k) + if val.IsZero() || (val.Kind() == reflect.Ptr && val.Elem().IsZero()) { + key := configFieldNames[field.Type().Elem()] + key = strings.TrimPrefix(key, configFieldPrefix) + return nil, fmt.Errorf("empty or null section in %s", key) + } + switch c := val.Interface().(type) { + case *targetgroup.Group: + // Add index to the static config target groups for unique identification + // within scrape pool. + c.Source = strconv.Itoa(len(targets)) + // Coalesce multiple static configs into a single static config. + targets = append(targets, c) + case Config: + configs = append(configs, c) + default: + panic("discovery: internal error: slice element is not a Config") + } + } + } + if len(targets) > 0 { + configs = append(configs, StaticConfig(targets)) + } + return configs, nil +} + +// MarshalYAMLWithInlineConfigs helps implement yaml.Marshal for structs +// that have a Configs field that should be inlined. +func MarshalYAMLWithInlineConfigs(in interface{}) (interface{}, error) { + inVal := reflect.ValueOf(in) + for inVal.Kind() == reflect.Ptr { + inVal = inVal.Elem() + } + inTyp := inVal.Type() + + cfgTyp := getConfigType(inTyp) + cfgPtr := reflect.New(cfgTyp) + cfgVal := cfgPtr.Elem() + + // Copy shared fields to dynamic value. + var configs *Configs + for i, n := 0, inTyp.NumField(); i < n; i++ { + if inTyp.Field(i).Type == configsType { + configs = inVal.Field(i).Addr().Interface().(*Configs) + } + if cfgTyp.Field(i).PkgPath != "" { + continue // Field is unexported: ignore. + } + cfgVal.Field(i).Set(inVal.Field(i)) + } + if configs == nil { + return nil, fmt.Errorf("discovery: Configs field not found in type: %T", in) + } + + if err := writeConfigs(cfgVal, *configs); err != nil { + return nil, err + } + + return cfgPtr.Interface(), nil +} + +func writeConfigs(structVal reflect.Value, configs Configs) error { + targets := structVal.FieldByName(staticConfigsFieldName).Addr().Interface().(*[]*targetgroup.Group) + for _, c := range configs { + if sc, ok := c.(StaticConfig); ok { + *targets = append(*targets, sc...) + continue + } + fieldName, ok := configFieldNames[reflect.TypeOf(c)] + if !ok { + return fmt.Errorf("discovery: cannot marshal unregistered Config type: %T", c) + } + field := structVal.FieldByName(fieldName) + field.Set(reflect.Append(field, reflect.ValueOf(c))) + } + return nil +} + +func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { + if e, ok := err.(*yaml.TypeError); ok { + oldStr := oldTyp.String() + newStr := newTyp.String() + for i, s := range e.Errors { + e.Errors[i] = strings.Replace(s, oldStr, newStr, -1) + } + } + return err +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/targetgroup/targetgroup.go b/vendor/github.com/prometheus/prometheus/discovery/targetgroup/targetgroup.go new file mode 100644 index 00000000000..d1dfc739315 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/discovery/targetgroup/targetgroup.go @@ -0,0 +1,93 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package targetgroup + +import ( + "bytes" + "encoding/json" + + "github.com/prometheus/common/model" +) + +// Group is a set of targets with a common label set(production , test, staging etc.). +type Group struct { + // Targets is a list of targets identified by a label set. Each target is + // uniquely identifiable in the group by its address label. + Targets []model.LabelSet + // Labels is a set of labels that is common across all targets in the group. + Labels model.LabelSet + + // Source is an identifier that describes a group of targets. + Source string +} + +func (tg Group) String() string { + return tg.Source +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (tg *Group) UnmarshalYAML(unmarshal func(interface{}) error) error { + g := struct { + Targets []string `yaml:"targets"` + Labels model.LabelSet `yaml:"labels"` + }{} + if err := unmarshal(&g); err != nil { + return err + } + tg.Targets = make([]model.LabelSet, 0, len(g.Targets)) + for _, t := range g.Targets { + tg.Targets = append(tg.Targets, model.LabelSet{ + model.AddressLabel: model.LabelValue(t), + }) + } + tg.Labels = g.Labels + return nil +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (tg Group) MarshalYAML() (interface{}, error) { + g := &struct { + Targets []string `yaml:"targets"` + Labels model.LabelSet `yaml:"labels,omitempty"` + }{ + Targets: make([]string, 0, len(tg.Targets)), + Labels: tg.Labels, + } + for _, t := range tg.Targets { + g.Targets = append(g.Targets, string(t[model.AddressLabel])) + } + return g, nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (tg *Group) UnmarshalJSON(b []byte) error { + g := struct { + Targets []string `json:"targets"` + Labels model.LabelSet `json:"labels"` + }{} + + dec := json.NewDecoder(bytes.NewReader(b)) + dec.DisallowUnknownFields() + if err := dec.Decode(&g); err != nil { + return err + } + tg.Targets = make([]model.LabelSet, 0, len(g.Targets)) + for _, t := range g.Targets { + tg.Targets = append(tg.Targets, model.LabelSet{ + model.AddressLabel: model.LabelValue(t), + }) + } + tg.Labels = g.Labels + return nil +} diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go new file mode 100644 index 00000000000..db08f3c85fa --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -0,0 +1,270 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package relabel + +import ( + "crypto/md5" + "fmt" + "regexp" + "strings" + + "github.com/pkg/errors" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/labels" +) + +var ( + relabelTarget = regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`) + + DefaultRelabelConfig = Config{ + Action: Replace, + Separator: ";", + Regex: MustNewRegexp("(.*)"), + Replacement: "$1", + } +) + +// Action is the action to be performed on relabeling. +type Action string + +const ( + // Replace performs a regex replacement. + Replace Action = "replace" + // Keep drops targets for which the input does not match the regex. + Keep Action = "keep" + // Drop drops targets for which the input does match the regex. + Drop Action = "drop" + // HashMod sets a label to the modulus of a hash of labels. + HashMod Action = "hashmod" + // LabelMap copies labels to other labelnames based on a regex. + LabelMap Action = "labelmap" + // LabelDrop drops any label matching the regex. + LabelDrop Action = "labeldrop" + // LabelKeep drops any label not matching the regex. + LabelKeep Action = "labelkeep" +) + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + switch act := Action(strings.ToLower(s)); act { + case Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, LabelKeep: + *a = act + return nil + } + return errors.Errorf("unknown relabel action %q", s) +} + +// Config is the configuration for relabeling of target label sets. +type Config struct { + // A list of labels from which values are taken and concatenated + // with the configured separator in order. + SourceLabels model.LabelNames `yaml:"source_labels,flow,omitempty"` + // Separator is the string between concatenated values from the source labels. + Separator string `yaml:"separator,omitempty"` + // Regex against which the concatenation is matched. + Regex Regexp `yaml:"regex,omitempty"` + // Modulus to take of the hash of concatenated values from the source labels. + Modulus uint64 `yaml:"modulus,omitempty"` + // TargetLabel is the label to which the resulting string is written in a replacement. + // Regexp interpolation is allowed for the replace action. + TargetLabel string `yaml:"target_label,omitempty"` + // Replacement is the regex replacement pattern to be used. + Replacement string `yaml:"replacement,omitempty"` + // Action is the action to be performed for the relabeling. + Action Action `yaml:"action,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultRelabelConfig + type plain Config + if err := unmarshal((*plain)(c)); err != nil { + return err + } + if c.Regex.Regexp == nil { + c.Regex = MustNewRegexp("") + } + if c.Action == "" { + return errors.Errorf("relabel action cannot be empty") + } + if c.Modulus == 0 && c.Action == HashMod { + return errors.Errorf("relabel configuration for hashmod requires non-zero modulus") + } + if (c.Action == Replace || c.Action == HashMod) && c.TargetLabel == "" { + return errors.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) + } + if c.Action == Replace && !relabelTarget.MatchString(c.TargetLabel) { + return errors.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) + } + if c.Action == LabelMap && !relabelTarget.MatchString(c.Replacement) { + return errors.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action) + } + if c.Action == HashMod && !model.LabelName(c.TargetLabel).IsValid() { + return errors.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) + } + + if c.Action == LabelDrop || c.Action == LabelKeep { + if c.SourceLabels != nil || + c.TargetLabel != DefaultRelabelConfig.TargetLabel || + c.Modulus != DefaultRelabelConfig.Modulus || + c.Separator != DefaultRelabelConfig.Separator || + c.Replacement != DefaultRelabelConfig.Replacement { + return errors.Errorf("%s action requires only 'regex', and no other fields", c.Action) + } + } + + return nil +} + +// Regexp encapsulates a regexp.Regexp and makes it YAML marshalable. +type Regexp struct { + *regexp.Regexp + original string +} + +// NewRegexp creates a new anchored Regexp and returns an error if the +// passed-in regular expression does not compile. +func NewRegexp(s string) (Regexp, error) { + regex, err := regexp.Compile("^(?:" + s + ")$") + return Regexp{ + Regexp: regex, + original: s, + }, err +} + +// MustNewRegexp works like NewRegexp, but panics if the regular expression does not compile. +func MustNewRegexp(s string) Regexp { + re, err := NewRegexp(s) + if err != nil { + panic(err) + } + return re +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + r, err := NewRegexp(s) + if err != nil { + return err + } + *re = r + return nil +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (re Regexp) MarshalYAML() (interface{}, error) { + if re.original != "" { + return re.original, nil + } + return nil, nil +} + +// Process returns a relabeled copy of the given label set. The relabel configurations +// are applied in order of input. +// If a label set is dropped, nil is returned. +// May return the input labelSet modified. +func Process(labels labels.Labels, cfgs ...*Config) labels.Labels { + for _, cfg := range cfgs { + labels = relabel(labels, cfg) + if labels == nil { + return nil + } + } + return labels +} + +func relabel(lset labels.Labels, cfg *Config) labels.Labels { + values := make([]string, 0, len(cfg.SourceLabels)) + for _, ln := range cfg.SourceLabels { + values = append(values, lset.Get(string(ln))) + } + val := strings.Join(values, cfg.Separator) + + lb := labels.NewBuilder(lset) + + switch cfg.Action { + case Drop: + if cfg.Regex.MatchString(val) { + return nil + } + case Keep: + if !cfg.Regex.MatchString(val) { + return nil + } + case Replace: + indexes := cfg.Regex.FindStringSubmatchIndex(val) + // If there is no match no replacement must take place. + if indexes == nil { + break + } + target := model.LabelName(cfg.Regex.ExpandString([]byte{}, cfg.TargetLabel, val, indexes)) + if !target.IsValid() { + lb.Del(cfg.TargetLabel) + break + } + res := cfg.Regex.ExpandString([]byte{}, cfg.Replacement, val, indexes) + if len(res) == 0 { + lb.Del(cfg.TargetLabel) + break + } + lb.Set(string(target), string(res)) + case HashMod: + mod := sum64(md5.Sum([]byte(val))) % cfg.Modulus + lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod)) + case LabelMap: + for _, l := range lset { + if cfg.Regex.MatchString(l.Name) { + res := cfg.Regex.ReplaceAllString(l.Name, cfg.Replacement) + lb.Set(res, l.Value) + } + } + case LabelDrop: + for _, l := range lset { + if cfg.Regex.MatchString(l.Name) { + lb.Del(l.Name) + } + } + case LabelKeep: + for _, l := range lset { + if !cfg.Regex.MatchString(l.Name) { + lb.Del(l.Name) + } + } + default: + panic(errors.Errorf("relabel: unknown relabel action type %q", cfg.Action)) + } + + return lb.Labels() +} + +// sum64 sums the md5 hash to an uint64. +func sum64(hash [md5.Size]byte) uint64 { + var s uint64 + + for i, b := range hash { + shift := uint64((md5.Size - i - 1) * 8) + + s |= uint64(b) << shift + } + return s +} diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/README.md b/vendor/github.com/prometheus/prometheus/model/textparse/README.md new file mode 100644 index 00000000000..697966f0975 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/textparse/README.md @@ -0,0 +1,6 @@ +# Making changes to textparse lexers +In the rare case that you need to update the textparse lexers, edit promlex.l or openmetricslex.l and then run the following command: +`golex -o=promlex.l.go promlex.l` + +Note that you need golex installed: +`go get -u modernc.org/golex` \ No newline at end of file diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go new file mode 100644 index 00000000000..cff86c0ed8f --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "mime" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" +) + +// Parser parses samples from a byte slice of samples in the official +// Prometheus and OpenMetrics text exposition formats. +type Parser interface { + // Series returns the bytes of the series, the timestamp if set, and the value + // of the current sample. + Series() ([]byte, *int64, float64) + + // Help returns the metric name and help text in the current entry. + // Must only be called after Next returned a help entry. + // The returned byte slices become invalid after the next call to Next. + Help() ([]byte, []byte) + + // Type returns the metric name and type in the current entry. + // Must only be called after Next returned a type entry. + // The returned byte slices become invalid after the next call to Next. + Type() ([]byte, MetricType) + + // Unit returns the metric name and unit in the current entry. + // Must only be called after Next returned a unit entry. + // The returned byte slices become invalid after the next call to Next. + Unit() ([]byte, []byte) + + // Comment returns the text of the current comment. + // Must only be called after Next returned a comment entry. + // The returned byte slice becomes invalid after the next call to Next. + Comment() []byte + + // Metric writes the labels of the current sample into the passed labels. + // It returns the string from which the metric was parsed. + Metric(l *labels.Labels) string + + // Exemplar writes the exemplar of the current sample into the passed + // exemplar. It returns if an exemplar exists or not. + Exemplar(l *exemplar.Exemplar) bool + + // Next advances the parser to the next sample. It returns false if no + // more samples were read or an error occurred. + Next() (Entry, error) +} + +// New returns a new parser of the byte slice. +func New(b []byte, contentType string) Parser { + mediaType, _, err := mime.ParseMediaType(contentType) + if err == nil && mediaType == "application/openmetrics-text" { + return NewOpenMetricsParser(b) + } + return NewPromParser(b) +} + +// Entry represents the type of a parsed entry. +type Entry int + +const ( + EntryInvalid Entry = -1 + EntryType Entry = 0 + EntryHelp Entry = 1 + EntrySeries Entry = 2 + EntryComment Entry = 3 + EntryUnit Entry = 4 +) + +// MetricType represents metric type values. +type MetricType string + +const ( + MetricTypeCounter = MetricType("counter") + MetricTypeGauge = MetricType("gauge") + MetricTypeHistogram = MetricType("histogram") + MetricTypeGaugeHistogram = MetricType("gaugehistogram") + MetricTypeSummary = MetricType("summary") + MetricTypeInfo = MetricType("info") + MetricTypeStateset = MetricType("stateset") + MetricTypeUnknown = MetricType("unknown") +) diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l new file mode 100644 index 00000000000..91e4439423a --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l @@ -0,0 +1,80 @@ +%{ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "fmt" +) + +// Lex is called by the parser generated by "go tool yacc" to obtain each +// token. The method is opened before the matching rules block and closed at +// the end of the file. +func (l *openMetricsLexer) Lex() token { + if l.i >= len(l.b) { + return tEOF + } + c := l.b[l.i] + l.start = l.i + +%} + +D [0-9] +L [a-zA-Z_] +M [a-zA-Z_:] +C [^\n] +S [ ] + +%x sComment sMeta1 sMeta2 sLabels sLValue sValue sTimestamp sExemplar sEValue sETimestamp + +%yyc c +%yyn c = l.next() +%yyt l.state + + +%% + +#{S} l.state = sComment +HELP{S} l.state = sMeta1; return tHelp +TYPE{S} l.state = sMeta1; return tType +UNIT{S} l.state = sMeta1; return tUnit +"EOF"\n? l.state = sInit; return tEOFWord +{M}({M}|{D})* l.state = sMeta2; return tMName +{S}{C}*\n l.state = sInit; return tText + +{M}({M}|{D})* l.state = sValue; return tMName +\{ l.state = sLabels; return tBraceOpen +{L}({L}|{D})* return tLName +\} l.state = sValue; return tBraceClose += l.state = sLValue; return tEqual +, return tComma +\"(\\.|[^\\"\n])*\" l.state = sLabels; return tLValue +{S}[^ \n]+ l.state = sTimestamp; return tValue +{S}[^ \n]+ return tTimestamp +\n l.state = sInit; return tLinebreak +{S}#{S}\{ l.state = sExemplar; return tComment + +{L}({L}|{D})* return tLName +\} l.state = sEValue; return tBraceClose += l.state = sEValue; return tEqual +\"(\\.|[^\\"\n])*\" l.state = sExemplar; return tLValue +, return tComma +{S}[^ \n]+ l.state = sETimestamp; return tValue +{S}[^ \n]+ return tTimestamp +\n l.state = sInit; return tLinebreak + +%% + + return tInvalid +} diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l.go new file mode 100644 index 00000000000..6093c9f59d5 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricslex.l.go @@ -0,0 +1,762 @@ +// Code generated by golex. DO NOT EDIT. + +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "fmt" +) + +// Lex is called by the parser generated by "go tool yacc" to obtain each +// token. The method is opened before the matching rules block and closed at +// the end of the file. +func (l *openMetricsLexer) Lex() token { + if l.i >= len(l.b) { + return tEOF + } + c := l.b[l.i] + l.start = l.i + +yystate0: + + switch yyt := l.state; yyt { + default: + panic(fmt.Errorf(`invalid start condition %d`, yyt)) + case 0: // start condition: INITIAL + goto yystart1 + case 1: // start condition: sComment + goto yystart5 + case 2: // start condition: sMeta1 + goto yystart25 + case 3: // start condition: sMeta2 + goto yystart27 + case 4: // start condition: sLabels + goto yystart30 + case 5: // start condition: sLValue + goto yystart35 + case 6: // start condition: sValue + goto yystart39 + case 7: // start condition: sTimestamp + goto yystart43 + case 8: // start condition: sExemplar + goto yystart50 + case 9: // start condition: sEValue + goto yystart55 + case 10: // start condition: sETimestamp + goto yystart61 + } + + goto yystate0 // silence unused label error + goto yystate1 // silence unused label error +yystate1: + c = l.next() +yystart1: + switch { + default: + goto yyabort + case c == '#': + goto yystate2 + case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate4 + } + +yystate2: + c = l.next() + switch { + default: + goto yyabort + case c == ' ': + goto yystate3 + } + +yystate3: + c = l.next() + goto yyrule1 + +yystate4: + c = l.next() + switch { + default: + goto yyrule8 + case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate4 + } + + goto yystate5 // silence unused label error +yystate5: + c = l.next() +yystart5: + switch { + default: + goto yyabort + case c == 'E': + goto yystate6 + case c == 'H': + goto yystate10 + case c == 'T': + goto yystate15 + case c == 'U': + goto yystate20 + } + +yystate6: + c = l.next() + switch { + default: + goto yyabort + case c == 'O': + goto yystate7 + } + +yystate7: + c = l.next() + switch { + default: + goto yyabort + case c == 'F': + goto yystate8 + } + +yystate8: + c = l.next() + switch { + default: + goto yyrule5 + case c == '\n': + goto yystate9 + } + +yystate9: + c = l.next() + goto yyrule5 + +yystate10: + c = l.next() + switch { + default: + goto yyabort + case c == 'E': + goto yystate11 + } + +yystate11: + c = l.next() + switch { + default: + goto yyabort + case c == 'L': + goto yystate12 + } + +yystate12: + c = l.next() + switch { + default: + goto yyabort + case c == 'P': + goto yystate13 + } + +yystate13: + c = l.next() + switch { + default: + goto yyabort + case c == ' ': + goto yystate14 + } + +yystate14: + c = l.next() + goto yyrule2 + +yystate15: + c = l.next() + switch { + default: + goto yyabort + case c == 'Y': + goto yystate16 + } + +yystate16: + c = l.next() + switch { + default: + goto yyabort + case c == 'P': + goto yystate17 + } + +yystate17: + c = l.next() + switch { + default: + goto yyabort + case c == 'E': + goto yystate18 + } + +yystate18: + c = l.next() + switch { + default: + goto yyabort + case c == ' ': + goto yystate19 + } + +yystate19: + c = l.next() + goto yyrule3 + +yystate20: + c = l.next() + switch { + default: + goto yyabort + case c == 'N': + goto yystate21 + } + +yystate21: + c = l.next() + switch { + default: + goto yyabort + case c == 'I': + goto yystate22 + } + +yystate22: + c = l.next() + switch { + default: + goto yyabort + case c == 'T': + goto yystate23 + } + +yystate23: + c = l.next() + switch { + default: + goto yyabort + case c == ' ': + goto yystate24 + } + +yystate24: + c = l.next() + goto yyrule4 + + goto yystate25 // silence unused label error +yystate25: + c = l.next() +yystart25: + switch { + default: + goto yyabort + case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate26 + } + +yystate26: + c = l.next() + switch { + default: + goto yyrule6 + case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate26 + } + + goto yystate27 // silence unused label error +yystate27: + c = l.next() +yystart27: + switch { + default: + goto yyabort + case c == ' ': + goto yystate28 + } + +yystate28: + c = l.next() + switch { + default: + goto yyabort + case c == '\n': + goto yystate29 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate28 + } + +yystate29: + c = l.next() + goto yyrule7 + + goto yystate30 // silence unused label error +yystate30: + c = l.next() +yystart30: + switch { + default: + goto yyabort + case c == ',': + goto yystate31 + case c == '=': + goto yystate32 + case c == '}': + goto yystate34 + case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate33 + } + +yystate31: + c = l.next() + goto yyrule13 + +yystate32: + c = l.next() + goto yyrule12 + +yystate33: + c = l.next() + switch { + default: + goto yyrule10 + case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate33 + } + +yystate34: + c = l.next() + goto yyrule11 + + goto yystate35 // silence unused label error +yystate35: + c = l.next() +yystart35: + switch { + default: + goto yyabort + case c == '"': + goto yystate36 + } + +yystate36: + c = l.next() + switch { + default: + goto yyabort + case c == '"': + goto yystate37 + case c == '\\': + goto yystate38 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate36 + } + +yystate37: + c = l.next() + goto yyrule14 + +yystate38: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate36 + } + + goto yystate39 // silence unused label error +yystate39: + c = l.next() +yystart39: + switch { + default: + goto yyabort + case c == ' ': + goto yystate40 + case c == '{': + goto yystate42 + } + +yystate40: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate41 + } + +yystate41: + c = l.next() + switch { + default: + goto yyrule15 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate41 + } + +yystate42: + c = l.next() + goto yyrule9 + + goto yystate43 // silence unused label error +yystate43: + c = l.next() +yystart43: + switch { + default: + goto yyabort + case c == ' ': + goto yystate45 + case c == '\n': + goto yystate44 + } + +yystate44: + c = l.next() + goto yyrule17 + +yystate45: + c = l.next() + switch { + default: + goto yyabort + case c == '#': + goto yystate47 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c == '!' || c == '"' || c >= '$' && c <= 'ÿ': + goto yystate46 + } + +yystate46: + c = l.next() + switch { + default: + goto yyrule16 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate46 + } + +yystate47: + c = l.next() + switch { + default: + goto yyrule16 + case c == ' ': + goto yystate48 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate46 + } + +yystate48: + c = l.next() + switch { + default: + goto yyabort + case c == '{': + goto yystate49 + } + +yystate49: + c = l.next() + goto yyrule18 + + goto yystate50 // silence unused label error +yystate50: + c = l.next() +yystart50: + switch { + default: + goto yyabort + case c == ',': + goto yystate51 + case c == '=': + goto yystate52 + case c == '}': + goto yystate54 + case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate53 + } + +yystate51: + c = l.next() + goto yyrule23 + +yystate52: + c = l.next() + goto yyrule21 + +yystate53: + c = l.next() + switch { + default: + goto yyrule19 + case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate53 + } + +yystate54: + c = l.next() + goto yyrule20 + + goto yystate55 // silence unused label error +yystate55: + c = l.next() +yystart55: + switch { + default: + goto yyabort + case c == ' ': + goto yystate56 + case c == '"': + goto yystate58 + } + +yystate56: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate57 + } + +yystate57: + c = l.next() + switch { + default: + goto yyrule24 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate57 + } + +yystate58: + c = l.next() + switch { + default: + goto yyabort + case c == '"': + goto yystate59 + case c == '\\': + goto yystate60 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate58 + } + +yystate59: + c = l.next() + goto yyrule22 + +yystate60: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate58 + } + + goto yystate61 // silence unused label error +yystate61: + c = l.next() +yystart61: + switch { + default: + goto yyabort + case c == ' ': + goto yystate63 + case c == '\n': + goto yystate62 + } + +yystate62: + c = l.next() + goto yyrule26 + +yystate63: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate64 + } + +yystate64: + c = l.next() + switch { + default: + goto yyrule25 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate64 + } + +yyrule1: // #{S} + { + l.state = sComment + goto yystate0 + } +yyrule2: // HELP{S} + { + l.state = sMeta1 + return tHelp + goto yystate0 + } +yyrule3: // TYPE{S} + { + l.state = sMeta1 + return tType + goto yystate0 + } +yyrule4: // UNIT{S} + { + l.state = sMeta1 + return tUnit + goto yystate0 + } +yyrule5: // "EOF"\n? + { + l.state = sInit + return tEOFWord + goto yystate0 + } +yyrule6: // {M}({M}|{D})* + { + l.state = sMeta2 + return tMName + goto yystate0 + } +yyrule7: // {S}{C}*\n + { + l.state = sInit + return tText + goto yystate0 + } +yyrule8: // {M}({M}|{D})* + { + l.state = sValue + return tMName + goto yystate0 + } +yyrule9: // \{ + { + l.state = sLabels + return tBraceOpen + goto yystate0 + } +yyrule10: // {L}({L}|{D})* + { + return tLName + } +yyrule11: // \} + { + l.state = sValue + return tBraceClose + goto yystate0 + } +yyrule12: // = + { + l.state = sLValue + return tEqual + goto yystate0 + } +yyrule13: // , + { + return tComma + } +yyrule14: // \"(\\.|[^\\"\n])*\" + { + l.state = sLabels + return tLValue + goto yystate0 + } +yyrule15: // {S}[^ \n]+ + { + l.state = sTimestamp + return tValue + goto yystate0 + } +yyrule16: // {S}[^ \n]+ + { + return tTimestamp + } +yyrule17: // \n + { + l.state = sInit + return tLinebreak + goto yystate0 + } +yyrule18: // {S}#{S}\{ + { + l.state = sExemplar + return tComment + goto yystate0 + } +yyrule19: // {L}({L}|{D})* + { + return tLName + } +yyrule20: // \} + { + l.state = sEValue + return tBraceClose + goto yystate0 + } +yyrule21: // = + { + l.state = sEValue + return tEqual + goto yystate0 + } +yyrule22: // \"(\\.|[^\\"\n])*\" + { + l.state = sExemplar + return tLValue + goto yystate0 + } +yyrule23: // , + { + return tComma + } +yyrule24: // {S}[^ \n]+ + { + l.state = sETimestamp + return tValue + goto yystate0 + } +yyrule25: // {S}[^ \n]+ + { + return tTimestamp + } +yyrule26: // \n + { + l.state = sInit + return tLinebreak + goto yystate0 + } + panic("unreachable") + + goto yyabort // silence unused label error + +yyabort: // no lexem recognized + + return tInvalid +} diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go new file mode 100644 index 00000000000..bbd39e3d554 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -0,0 +1,481 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate go get -u modernc.org/golex +//go:generate golex -o=openmetricslex.l.go openmetricslex.l + +package textparse + +import ( + "bytes" + "fmt" + "io" + "math" + "sort" + "strings" + "unicode/utf8" + + "github.com/pkg/errors" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" +) + +var allowedSuffixes = [][]byte{[]byte("_total"), []byte("_bucket")} + +type openMetricsLexer struct { + b []byte + i int + start int + err error + state int +} + +// buf returns the buffer of the current token. +func (l *openMetricsLexer) buf() []byte { + return l.b[l.start:l.i] +} + +func (l *openMetricsLexer) cur() byte { + if l.i < len(l.b) { + return l.b[l.i] + } + return byte(' ') +} + +// next advances the openMetricsLexer to the next character. +func (l *openMetricsLexer) next() byte { + l.i++ + if l.i >= len(l.b) { + l.err = io.EOF + return byte(tEOF) + } + // Lex struggles with null bytes. If we are in a label value or help string, where + // they are allowed, consume them here immediately. + for l.b[l.i] == 0 && (l.state == sLValue || l.state == sMeta2 || l.state == sComment) { + l.i++ + if l.i >= len(l.b) { + l.err = io.EOF + return byte(tEOF) + } + } + return l.b[l.i] +} + +func (l *openMetricsLexer) Error(es string) { + l.err = errors.New(es) +} + +// OpenMetricsParser parses samples from a byte slice of samples in the official +// OpenMetrics text exposition format. +// This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit +type OpenMetricsParser struct { + l *openMetricsLexer + series []byte + text []byte + mtype MetricType + val float64 + ts int64 + hasTS bool + start int + offsets []int + + eOffsets []int + exemplar []byte + exemplarVal float64 + exemplarTs int64 + hasExemplarTs bool +} + +// NewOpenMetricsParser returns a new parser of the byte slice. +func NewOpenMetricsParser(b []byte) Parser { + return &OpenMetricsParser{l: &openMetricsLexer{b: b}} +} + +// Series returns the bytes of the series, the timestamp if set, and the value +// of the current sample. +func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) { + if p.hasTS { + ts := p.ts + return p.series, &ts, p.val + } + return p.series, nil, p.val +} + +// Help returns the metric name and help text in the current entry. +// Must only be called after Next returned a help entry. +// The returned byte slices become invalid after the next call to Next. +func (p *OpenMetricsParser) Help() ([]byte, []byte) { + m := p.l.b[p.offsets[0]:p.offsets[1]] + + // Replacer causes allocations. Replace only when necessary. + if strings.IndexByte(yoloString(p.text), byte('\\')) >= 0 { + // OpenMetrics always uses the Prometheus format label value escaping. + return m, []byte(lvalReplacer.Replace(string(p.text))) + } + return m, p.text +} + +// Type returns the metric name and type in the current entry. +// Must only be called after Next returned a type entry. +// The returned byte slices become invalid after the next call to Next. +func (p *OpenMetricsParser) Type() ([]byte, MetricType) { + return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype +} + +// Unit returns the metric name and unit in the current entry. +// Must only be called after Next returned a unit entry. +// The returned byte slices become invalid after the next call to Next. +func (p *OpenMetricsParser) Unit() ([]byte, []byte) { + // The Prometheus format does not have units. + return p.l.b[p.offsets[0]:p.offsets[1]], p.text +} + +// Comment returns the text of the current comment. +// Must only be called after Next returned a comment entry. +// The returned byte slice becomes invalid after the next call to Next. +func (p *OpenMetricsParser) Comment() []byte { + return p.text +} + +// Metric writes the labels of the current sample into the passed labels. +// It returns the string from which the metric was parsed. +func (p *OpenMetricsParser) Metric(l *labels.Labels) string { + // Allocate the full immutable string immediately, so we just + // have to create references on it below. + s := string(p.series) + + *l = append(*l, labels.Label{ + Name: labels.MetricName, + Value: s[:p.offsets[0]-p.start], + }) + + for i := 1; i < len(p.offsets); i += 4 { + a := p.offsets[i] - p.start + b := p.offsets[i+1] - p.start + c := p.offsets[i+2] - p.start + d := p.offsets[i+3] - p.start + + // Replacer causes allocations. Replace only when necessary. + if strings.IndexByte(s[c:d], byte('\\')) >= 0 { + *l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])}) + continue + } + *l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]}) + } + + // Sort labels. We can skip the first entry since the metric name is + // already at the right place. + sort.Sort((*l)[1:]) + + return s +} + +// Exemplar writes the exemplar of the current sample into the passed +// exemplar. It returns the whether an exemplar exists. +func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { + if len(p.exemplar) == 0 { + return false + } + + // Allocate the full immutable string immediately, so we just + // have to create references on it below. + s := string(p.exemplar) + + e.Value = p.exemplarVal + if p.hasExemplarTs { + e.HasTs = true + e.Ts = p.exemplarTs + } + + for i := 0; i < len(p.eOffsets); i += 4 { + a := p.eOffsets[i] - p.start + b := p.eOffsets[i+1] - p.start + c := p.eOffsets[i+2] - p.start + d := p.eOffsets[i+3] - p.start + + e.Labels = append(e.Labels, labels.Label{Name: s[a:b], Value: s[c:d]}) + } + + // Sort the labels. + sort.Sort(e.Labels) + + return true +} + +// nextToken returns the next token from the openMetricsLexer. +func (p *OpenMetricsParser) nextToken() token { + tok := p.l.Lex() + return tok +} + +// Next advances the parser to the next sample. It returns false if no +// more samples were read or an error occurred. +func (p *OpenMetricsParser) Next() (Entry, error) { + var err error + + p.start = p.l.i + p.offsets = p.offsets[:0] + p.eOffsets = p.eOffsets[:0] + p.exemplar = p.exemplar[:0] + p.exemplarVal = 0 + p.hasExemplarTs = false + + switch t := p.nextToken(); t { + case tEOFWord: + if t := p.nextToken(); t != tEOF { + return EntryInvalid, errors.New("unexpected data after # EOF") + } + return EntryInvalid, io.EOF + case tEOF: + return EntryInvalid, errors.New("data does not end with # EOF") + case tHelp, tType, tUnit: + switch t := p.nextToken(); t { + case tMName: + p.offsets = append(p.offsets, p.l.start, p.l.i) + default: + return EntryInvalid, parseError("expected metric name after HELP", t) + } + switch t := p.nextToken(); t { + case tText: + if len(p.l.buf()) > 1 { + p.text = p.l.buf()[1 : len(p.l.buf())-1] + } else { + p.text = []byte{} + } + default: + return EntryInvalid, parseError("expected text in HELP", t) + } + switch t { + case tType: + switch s := yoloString(p.text); s { + case "counter": + p.mtype = MetricTypeCounter + case "gauge": + p.mtype = MetricTypeGauge + case "histogram": + p.mtype = MetricTypeHistogram + case "gaugehistogram": + p.mtype = MetricTypeGaugeHistogram + case "summary": + p.mtype = MetricTypeSummary + case "info": + p.mtype = MetricTypeInfo + case "stateset": + p.mtype = MetricTypeStateset + case "unknown": + p.mtype = MetricTypeUnknown + default: + return EntryInvalid, errors.Errorf("invalid metric type %q", s) + } + case tHelp: + if !utf8.Valid(p.text) { + return EntryInvalid, errors.New("help text is not a valid utf8 string") + } + } + switch t { + case tHelp: + return EntryHelp, nil + case tType: + return EntryType, nil + case tUnit: + m := yoloString(p.l.b[p.offsets[0]:p.offsets[1]]) + u := yoloString(p.text) + if len(u) > 0 { + if !strings.HasSuffix(m, u) || len(m) < len(u)+1 || p.l.b[p.offsets[1]-len(u)-1] != '_' { + return EntryInvalid, errors.Errorf("unit not a suffix of metric %q", m) + } + } + return EntryUnit, nil + } + + case tMName: + p.offsets = append(p.offsets, p.l.i) + p.series = p.l.b[p.start:p.l.i] + + t2 := p.nextToken() + if t2 == tBraceOpen { + p.offsets, err = p.parseLVals(p.offsets) + if err != nil { + return EntryInvalid, err + } + p.series = p.l.b[p.start:p.l.i] + t2 = p.nextToken() + } + p.val, err = p.getFloatValue(t2, "metric") + if err != nil { + return EntryInvalid, err + } + + p.hasTS = false + switch t2 := p.nextToken(); t2 { + case tEOF: + return EntryInvalid, errors.New("data does not end with # EOF") + case tLinebreak: + break + case tComment: + if err := p.parseComment(); err != nil { + return EntryInvalid, err + } + case tTimestamp: + p.hasTS = true + var ts float64 + // A float is enough to hold what we need for millisecond resolution. + if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { + return EntryInvalid, err + } + if math.IsNaN(ts) || math.IsInf(ts, 0) { + return EntryInvalid, errors.New("invalid timestamp") + } + p.ts = int64(ts * 1000) + switch t3 := p.nextToken(); t3 { + case tLinebreak: + case tComment: + if err := p.parseComment(); err != nil { + return EntryInvalid, err + } + default: + return EntryInvalid, parseError("expected next entry after timestamp", t3) + } + default: + return EntryInvalid, parseError("expected timestamp or # symbol", t2) + } + return EntrySeries, nil + + default: + err = errors.Errorf("%q %q is not a valid start token", t, string(p.l.cur())) + } + return EntryInvalid, err +} + +func (p *OpenMetricsParser) parseComment() error { + // Validate the name of the metric. It must have _total or _bucket as + // suffix for exemplars to be supported. + if err := p.validateNameForExemplar(p.series[:p.offsets[0]-p.start]); err != nil { + return err + } + + var err error + // Parse the labels. + p.eOffsets, err = p.parseLVals(p.eOffsets) + if err != nil { + return err + } + p.exemplar = p.l.b[p.start:p.l.i] + + // Get the value. + p.exemplarVal, err = p.getFloatValue(p.nextToken(), "exemplar labels") + if err != nil { + return err + } + + // Read the optional timestamp. + p.hasExemplarTs = false + switch t2 := p.nextToken(); t2 { + case tEOF: + return errors.New("data does not end with # EOF") + case tLinebreak: + break + case tTimestamp: + p.hasExemplarTs = true + var ts float64 + // A float is enough to hold what we need for millisecond resolution. + if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { + return err + } + if math.IsNaN(ts) || math.IsInf(ts, 0) { + return errors.New("invalid exemplar timestamp") + } + p.exemplarTs = int64(ts * 1000) + switch t3 := p.nextToken(); t3 { + case tLinebreak: + default: + return parseError("expected next entry after exemplar timestamp", t3) + } + default: + return parseError("expected timestamp or comment", t2) + } + return nil +} + +func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) { + first := true + for { + t := p.nextToken() + switch t { + case tBraceClose: + return offsets, nil + case tComma: + if first { + return nil, parseError("expected label name or left brace", t) + } + t = p.nextToken() + if t != tLName { + return nil, parseError("expected label name", t) + } + case tLName: + if !first { + return nil, parseError("expected comma", t) + } + default: + if first { + return nil, parseError("expected label name or left brace", t) + } + return nil, parseError("expected comma or left brace", t) + + } + first = false + // t is now a label name. + + offsets = append(offsets, p.l.start, p.l.i) + + if t := p.nextToken(); t != tEqual { + return nil, parseError("expected equal", t) + } + if t := p.nextToken(); t != tLValue { + return nil, parseError("expected label value", t) + } + if !utf8.Valid(p.l.buf()) { + return nil, errors.New("invalid UTF-8 label value") + } + + // The openMetricsLexer ensures the value string is quoted. Strip first + // and last character. + offsets = append(offsets, p.l.start+1, p.l.i-1) + } +} + +func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) { + if t != tValue { + return 0, parseError(fmt.Sprintf("expected value after %v", after), t) + } + val, err := parseFloat(yoloString(p.l.buf()[1:])) + if err != nil { + return 0, err + } + // Ensure canonical NaN value. + if math.IsNaN(p.exemplarVal) { + val = math.Float64frombits(value.NormalNaN) + } + return val, nil +} + +func (p *OpenMetricsParser) validateNameForExemplar(name []byte) error { + for _, suffix := range allowedSuffixes { + if bytes.HasSuffix(name, suffix) { + return nil + } + } + return fmt.Errorf("metric name %v does not support exemplars", string(name)) +} diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l b/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l new file mode 100644 index 00000000000..c3c5c3bb001 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l @@ -0,0 +1,100 @@ +%{ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "fmt" +) + +const ( + sInit = iota + sComment + sMeta1 + sMeta2 + sLabels + sLValue + sValue + sTimestamp +) + +// Lex is called by the parser generated by "go tool yacc" to obtain each +// token. The method is opened before the matching rules block and closed at +// the end of the file. +func (l *promlexer) Lex() token { + if l.i >= len(l.b) { + return tEOF + } + c := l.b[l.i] + l.start = l.i + +%} + +D [0-9] +L [a-zA-Z_] +M [a-zA-Z_:] +C [^\n] + +%x sComment sMeta1 sMeta2 sLabels sLValue sValue sTimestamp + +%yyc c +%yyn c = l.next() +%yyt l.state + + +%% + +\0 return tEOF +\n l.state = sInit; return tLinebreak +<*>[ \t]+ return tWhitespace + +#[ \t]+ l.state = sComment +# return l.consumeComment() +HELP[\t ]+ l.state = sMeta1; return tHelp +TYPE[\t ]+ l.state = sMeta1; return tType +{M}({M}|{D})* l.state = sMeta2; return tMName +{C}* l.state = sInit; return tText + +{M}({M}|{D})* l.state = sValue; return tMName +\{ l.state = sLabels; return tBraceOpen +{L}({L}|{D})* return tLName +\} l.state = sValue; return tBraceClose += l.state = sLValue; return tEqual +, return tComma +\"(\\.|[^\\"])*\" l.state = sLabels; return tLValue +[^{ \t\n]+ l.state = sTimestamp; return tValue +{D}+ return tTimestamp +\n l.state = sInit; return tLinebreak + +%% + // Workaround to gobble up comments that started with a HELP or TYPE + // prefix. We just consume all characters until we reach a newline. + // This saves us from adding disproportionate complexity to the parser. + if l.state == sComment { + return l.consumeComment() + } + return tInvalid +} + +func (l *promlexer) consumeComment() token { + for c := l.cur(); ; c = l.next() { + switch c { + case 0: + return tEOF + case '\n': + l.state = sInit + return tComment + } + } +} diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l.go b/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l.go new file mode 100644 index 00000000000..690ec4e05bb --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promlex.l.go @@ -0,0 +1,553 @@ +// CAUTION: Generated file - DO NOT EDIT. + +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "github.com/pkg/errors" +) + +const ( + sInit = iota + sComment + sMeta1 + sMeta2 + sLabels + sLValue + sValue + sTimestamp + sExemplar + sEValue + sETimestamp +) + +// Lex is called by the parser generated by "go tool yacc" to obtain each +// token. The method is opened before the matching rules block and closed at +// the end of the file. +func (l *promlexer) Lex() token { + if l.i >= len(l.b) { + return tEOF + } + c := l.b[l.i] + l.start = l.i + +yystate0: + + switch yyt := l.state; yyt { + default: + panic(errors.Errorf(`invalid start condition %d`, yyt)) + case 0: // start condition: INITIAL + goto yystart1 + case 1: // start condition: sComment + goto yystart8 + case 2: // start condition: sMeta1 + goto yystart19 + case 3: // start condition: sMeta2 + goto yystart21 + case 4: // start condition: sLabels + goto yystart24 + case 5: // start condition: sLValue + goto yystart29 + case 6: // start condition: sValue + goto yystart33 + case 7: // start condition: sTimestamp + goto yystart36 + } + + goto yystate0 // silence unused label error + goto yystate1 // silence unused label error +yystate1: + c = l.next() +yystart1: + switch { + default: + goto yyabort + case c == '#': + goto yystate5 + case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate7 + case c == '\n': + goto yystate4 + case c == '\t' || c == ' ': + goto yystate3 + case c == '\x00': + goto yystate2 + } + +yystate2: + c = l.next() + goto yyrule1 + +yystate3: + c = l.next() + switch { + default: + goto yyrule3 + case c == '\t' || c == ' ': + goto yystate3 + } + +yystate4: + c = l.next() + goto yyrule2 + +yystate5: + c = l.next() + switch { + default: + goto yyrule5 + case c == '\t' || c == ' ': + goto yystate6 + } + +yystate6: + c = l.next() + switch { + default: + goto yyrule4 + case c == '\t' || c == ' ': + goto yystate6 + } + +yystate7: + c = l.next() + switch { + default: + goto yyrule10 + case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate7 + } + + goto yystate8 // silence unused label error +yystate8: + c = l.next() +yystart8: + switch { + default: + goto yyabort + case c == 'H': + goto yystate9 + case c == 'T': + goto yystate14 + case c == '\t' || c == ' ': + goto yystate3 + } + +yystate9: + c = l.next() + switch { + default: + goto yyabort + case c == 'E': + goto yystate10 + } + +yystate10: + c = l.next() + switch { + default: + goto yyabort + case c == 'L': + goto yystate11 + } + +yystate11: + c = l.next() + switch { + default: + goto yyabort + case c == 'P': + goto yystate12 + } + +yystate12: + c = l.next() + switch { + default: + goto yyabort + case c == '\t' || c == ' ': + goto yystate13 + } + +yystate13: + c = l.next() + switch { + default: + goto yyrule6 + case c == '\t' || c == ' ': + goto yystate13 + } + +yystate14: + c = l.next() + switch { + default: + goto yyabort + case c == 'Y': + goto yystate15 + } + +yystate15: + c = l.next() + switch { + default: + goto yyabort + case c == 'P': + goto yystate16 + } + +yystate16: + c = l.next() + switch { + default: + goto yyabort + case c == 'E': + goto yystate17 + } + +yystate17: + c = l.next() + switch { + default: + goto yyabort + case c == '\t' || c == ' ': + goto yystate18 + } + +yystate18: + c = l.next() + switch { + default: + goto yyrule7 + case c == '\t' || c == ' ': + goto yystate18 + } + + goto yystate19 // silence unused label error +yystate19: + c = l.next() +yystart19: + switch { + default: + goto yyabort + case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate20 + case c == '\t' || c == ' ': + goto yystate3 + } + +yystate20: + c = l.next() + switch { + default: + goto yyrule8 + case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate20 + } + + goto yystate21 // silence unused label error +yystate21: + c = l.next() +yystart21: + switch { + default: + goto yyrule9 + case c == '\t' || c == ' ': + goto yystate23 + case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate22 + } + +yystate22: + c = l.next() + switch { + default: + goto yyrule9 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate22 + } + +yystate23: + c = l.next() + switch { + default: + goto yyrule3 + case c == '\t' || c == ' ': + goto yystate23 + case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate22 + } + + goto yystate24 // silence unused label error +yystate24: + c = l.next() +yystart24: + switch { + default: + goto yyabort + case c == ',': + goto yystate25 + case c == '=': + goto yystate26 + case c == '\t' || c == ' ': + goto yystate3 + case c == '}': + goto yystate28 + case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate27 + } + +yystate25: + c = l.next() + goto yyrule15 + +yystate26: + c = l.next() + goto yyrule14 + +yystate27: + c = l.next() + switch { + default: + goto yyrule12 + case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate27 + } + +yystate28: + c = l.next() + goto yyrule13 + + goto yystate29 // silence unused label error +yystate29: + c = l.next() +yystart29: + switch { + default: + goto yyabort + case c == '"': + goto yystate30 + case c == '\t' || c == ' ': + goto yystate3 + } + +yystate30: + c = l.next() + switch { + default: + goto yyabort + case c == '"': + goto yystate31 + case c == '\\': + goto yystate32 + case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate30 + } + +yystate31: + c = l.next() + goto yyrule16 + +yystate32: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate30 + } + + goto yystate33 // silence unused label error +yystate33: + c = l.next() +yystart33: + switch { + default: + goto yyabort + case c == '\t' || c == ' ': + goto yystate3 + case c == '{': + goto yystate35 + case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ': + goto yystate34 + } + +yystate34: + c = l.next() + switch { + default: + goto yyrule17 + case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ': + goto yystate34 + } + +yystate35: + c = l.next() + goto yyrule11 + + goto yystate36 // silence unused label error +yystate36: + c = l.next() +yystart36: + switch { + default: + goto yyabort + case c == '\n': + goto yystate37 + case c == '\t' || c == ' ': + goto yystate3 + case c >= '0' && c <= '9': + goto yystate38 + } + +yystate37: + c = l.next() + goto yyrule19 + +yystate38: + c = l.next() + switch { + default: + goto yyrule18 + case c >= '0' && c <= '9': + goto yystate38 + } + +yyrule1: // \0 + { + return tEOF + } +yyrule2: // \n + { + l.state = sInit + return tLinebreak + goto yystate0 + } +yyrule3: // [ \t]+ + { + return tWhitespace + } +yyrule4: // #[ \t]+ + { + l.state = sComment + goto yystate0 + } +yyrule5: // # + { + return l.consumeComment() + } +yyrule6: // HELP[\t ]+ + { + l.state = sMeta1 + return tHelp + goto yystate0 + } +yyrule7: // TYPE[\t ]+ + { + l.state = sMeta1 + return tType + goto yystate0 + } +yyrule8: // {M}({M}|{D})* + { + l.state = sMeta2 + return tMName + goto yystate0 + } +yyrule9: // {C}* + { + l.state = sInit + return tText + goto yystate0 + } +yyrule10: // {M}({M}|{D})* + { + l.state = sValue + return tMName + goto yystate0 + } +yyrule11: // \{ + { + l.state = sLabels + return tBraceOpen + goto yystate0 + } +yyrule12: // {L}({L}|{D})* + { + return tLName + } +yyrule13: // \} + { + l.state = sValue + return tBraceClose + goto yystate0 + } +yyrule14: // = + { + l.state = sLValue + return tEqual + goto yystate0 + } +yyrule15: // , + { + return tComma + } +yyrule16: // \"(\\.|[^\\"])*\" + { + l.state = sLabels + return tLValue + goto yystate0 + } +yyrule17: // [^{ \t\n]+ + { + l.state = sTimestamp + return tValue + goto yystate0 + } +yyrule18: // {D}+ + { + return tTimestamp + } +yyrule19: // \n + { + l.state = sInit + return tLinebreak + goto yystate0 + } + panic("unreachable") + + goto yyabort // silence unused label error + +yyabort: // no lexem recognized + // Workaround to gobble up comments that started with a HELP or TYPE + // prefix. We just consume all characters until we reach a newline. + // This saves us from adding disproportionate complexity to the parser. + if l.state == sComment { + return l.consumeComment() + } + return tInvalid +} + +func (l *promlexer) consumeComment() token { + for c := l.cur(); ; c = l.next() { + switch c { + case 0: + return tEOF + case '\n': + l.state = sInit + return tComment + } + } +} diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go new file mode 100644 index 00000000000..8dc23a6b01d --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go @@ -0,0 +1,426 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate go get -u modernc.org/golex +//go:generate golex -o=promlex.l.go promlex.l + +package textparse + +import ( + "fmt" + "io" + "math" + "sort" + "strconv" + "strings" + "unicode/utf8" + "unsafe" + + "github.com/pkg/errors" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/value" +) + +type promlexer struct { + b []byte + i int + start int + err error + state int +} + +type token int + +const ( + tInvalid token = -1 + tEOF token = 0 + tLinebreak token = iota + tWhitespace + tHelp + tType + tUnit + tEOFWord + tText + tComment + tBlank + tMName + tBraceOpen + tBraceClose + tLName + tLValue + tComma + tEqual + tTimestamp + tValue +) + +func (t token) String() string { + switch t { + case tInvalid: + return "INVALID" + case tEOF: + return "EOF" + case tLinebreak: + return "LINEBREAK" + case tWhitespace: + return "WHITESPACE" + case tHelp: + return "HELP" + case tType: + return "TYPE" + case tUnit: + return "UNIT" + case tEOFWord: + return "EOFWORD" + case tText: + return "TEXT" + case tComment: + return "COMMENT" + case tBlank: + return "BLANK" + case tMName: + return "MNAME" + case tBraceOpen: + return "BOPEN" + case tBraceClose: + return "BCLOSE" + case tLName: + return "LNAME" + case tLValue: + return "LVALUE" + case tEqual: + return "EQUAL" + case tComma: + return "COMMA" + case tTimestamp: + return "TIMESTAMP" + case tValue: + return "VALUE" + } + return fmt.Sprintf("", t) +} + +// buf returns the buffer of the current token. +func (l *promlexer) buf() []byte { + return l.b[l.start:l.i] +} + +func (l *promlexer) cur() byte { + return l.b[l.i] +} + +// next advances the promlexer to the next character. +func (l *promlexer) next() byte { + l.i++ + if l.i >= len(l.b) { + l.err = io.EOF + return byte(tEOF) + } + // Lex struggles with null bytes. If we are in a label value or help string, where + // they are allowed, consume them here immediately. + for l.b[l.i] == 0 && (l.state == sLValue || l.state == sMeta2 || l.state == sComment) { + l.i++ + } + return l.b[l.i] +} + +func (l *promlexer) Error(es string) { + l.err = errors.New(es) +} + +// PromParser parses samples from a byte slice of samples in the official +// Prometheus text exposition format. +type PromParser struct { + l *promlexer + series []byte + text []byte + mtype MetricType + val float64 + ts int64 + hasTS bool + start int + offsets []int +} + +// NewPromParser returns a new parser of the byte slice. +func NewPromParser(b []byte) Parser { + return &PromParser{l: &promlexer{b: append(b, '\n')}} +} + +// Series returns the bytes of the series, the timestamp if set, and the value +// of the current sample. +func (p *PromParser) Series() ([]byte, *int64, float64) { + if p.hasTS { + return p.series, &p.ts, p.val + } + return p.series, nil, p.val +} + +// Help returns the metric name and help text in the current entry. +// Must only be called after Next returned a help entry. +// The returned byte slices become invalid after the next call to Next. +func (p *PromParser) Help() ([]byte, []byte) { + m := p.l.b[p.offsets[0]:p.offsets[1]] + + // Replacer causes allocations. Replace only when necessary. + if strings.IndexByte(yoloString(p.text), byte('\\')) >= 0 { + return m, []byte(helpReplacer.Replace(string(p.text))) + } + return m, p.text +} + +// Type returns the metric name and type in the current entry. +// Must only be called after Next returned a type entry. +// The returned byte slices become invalid after the next call to Next. +func (p *PromParser) Type() ([]byte, MetricType) { + return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype +} + +// Unit returns the metric name and unit in the current entry. +// Must only be called after Next returned a unit entry. +// The returned byte slices become invalid after the next call to Next. +func (p *PromParser) Unit() ([]byte, []byte) { + // The Prometheus format does not have units. + return nil, nil +} + +// Comment returns the text of the current comment. +// Must only be called after Next returned a comment entry. +// The returned byte slice becomes invalid after the next call to Next. +func (p *PromParser) Comment() []byte { + return p.text +} + +// Metric writes the labels of the current sample into the passed labels. +// It returns the string from which the metric was parsed. +func (p *PromParser) Metric(l *labels.Labels) string { + // Allocate the full immutable string immediately, so we just + // have to create references on it below. + s := string(p.series) + + *l = append(*l, labels.Label{ + Name: labels.MetricName, + Value: s[:p.offsets[0]-p.start], + }) + + for i := 1; i < len(p.offsets); i += 4 { + a := p.offsets[i] - p.start + b := p.offsets[i+1] - p.start + c := p.offsets[i+2] - p.start + d := p.offsets[i+3] - p.start + + // Replacer causes allocations. Replace only when necessary. + if strings.IndexByte(s[c:d], byte('\\')) >= 0 { + *l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])}) + continue + } + *l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]}) + } + + // Sort labels to maintain the sorted labels invariant. + sort.Sort(*l) + + return s +} + +// Exemplar writes the exemplar of the current sample into the passed +// exemplar. It returns if an exemplar exists. +func (p *PromParser) Exemplar(e *exemplar.Exemplar) bool { + return false +} + +// nextToken returns the next token from the promlexer. It skips over tabs +// and spaces. +func (p *PromParser) nextToken() token { + for { + if tok := p.l.Lex(); tok != tWhitespace { + return tok + } + } +} + +func parseError(exp string, got token) error { + return errors.Errorf("%s, got %q", exp, got) +} + +// Next advances the parser to the next sample. It returns false if no +// more samples were read or an error occurred. +func (p *PromParser) Next() (Entry, error) { + var err error + + p.start = p.l.i + p.offsets = p.offsets[:0] + + switch t := p.nextToken(); t { + case tEOF: + return EntryInvalid, io.EOF + case tLinebreak: + // Allow full blank lines. + return p.Next() + + case tHelp, tType: + switch t := p.nextToken(); t { + case tMName: + p.offsets = append(p.offsets, p.l.start, p.l.i) + default: + return EntryInvalid, parseError("expected metric name after HELP", t) + } + switch t := p.nextToken(); t { + case tText: + if len(p.l.buf()) > 1 { + p.text = p.l.buf()[1:] + } else { + p.text = []byte{} + } + default: + return EntryInvalid, parseError("expected text in HELP", t) + } + switch t { + case tType: + switch s := yoloString(p.text); s { + case "counter": + p.mtype = MetricTypeCounter + case "gauge": + p.mtype = MetricTypeGauge + case "histogram": + p.mtype = MetricTypeHistogram + case "summary": + p.mtype = MetricTypeSummary + case "untyped": + p.mtype = MetricTypeUnknown + default: + return EntryInvalid, errors.Errorf("invalid metric type %q", s) + } + case tHelp: + if !utf8.Valid(p.text) { + return EntryInvalid, errors.Errorf("help text is not a valid utf8 string") + } + } + if t := p.nextToken(); t != tLinebreak { + return EntryInvalid, parseError("linebreak expected after metadata", t) + } + switch t { + case tHelp: + return EntryHelp, nil + case tType: + return EntryType, nil + } + case tComment: + p.text = p.l.buf() + if t := p.nextToken(); t != tLinebreak { + return EntryInvalid, parseError("linebreak expected after comment", t) + } + return EntryComment, nil + + case tMName: + p.offsets = append(p.offsets, p.l.i) + p.series = p.l.b[p.start:p.l.i] + + t2 := p.nextToken() + if t2 == tBraceOpen { + if err := p.parseLVals(); err != nil { + return EntryInvalid, err + } + p.series = p.l.b[p.start:p.l.i] + t2 = p.nextToken() + } + if t2 != tValue { + return EntryInvalid, parseError("expected value after metric", t) + } + if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil { + return EntryInvalid, err + } + // Ensure canonical NaN value. + if math.IsNaN(p.val) { + p.val = math.Float64frombits(value.NormalNaN) + } + p.hasTS = false + switch p.nextToken() { + case tLinebreak: + break + case tTimestamp: + p.hasTS = true + if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil { + return EntryInvalid, err + } + if t2 := p.nextToken(); t2 != tLinebreak { + return EntryInvalid, parseError("expected next entry after timestamp", t) + } + default: + return EntryInvalid, parseError("expected timestamp or new record", t) + } + return EntrySeries, nil + + default: + err = errors.Errorf("%q is not a valid start token", t) + } + return EntryInvalid, err +} + +func (p *PromParser) parseLVals() error { + t := p.nextToken() + for { + switch t { + case tBraceClose: + return nil + case tLName: + default: + return parseError("expected label name", t) + } + p.offsets = append(p.offsets, p.l.start, p.l.i) + + if t := p.nextToken(); t != tEqual { + return parseError("expected equal", t) + } + if t := p.nextToken(); t != tLValue { + return parseError("expected label value", t) + } + if !utf8.Valid(p.l.buf()) { + return errors.Errorf("invalid UTF-8 label value") + } + + // The promlexer ensures the value string is quoted. Strip first + // and last character. + p.offsets = append(p.offsets, p.l.start+1, p.l.i-1) + + // Free trailing commas are allowed. + if t = p.nextToken(); t == tComma { + t = p.nextToken() + } + } +} + +var lvalReplacer = strings.NewReplacer( + `\"`, "\"", + `\\`, "\\", + `\n`, "\n", +) + +var helpReplacer = strings.NewReplacer( + `\\`, "\\", + `\n`, "\n", +) + +func yoloString(b []byte) string { + return *((*string)(unsafe.Pointer(&b))) +} + +func parseFloat(s string) (float64, error) { + // Keep to pre-Go 1.13 float formats. + if strings.ContainsAny(s, "pP_") { + return 0, fmt.Errorf("unsupported character in float") + } + return strconv.ParseFloat(s, 64) +} diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.nometa.txt b/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.nometa.txt new file mode 100644 index 00000000000..235f0aa464b --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.nometa.txt @@ -0,0 +1,411 @@ +go_gc_duration_seconds{quantile="0"} 4.9351e-05 +go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 +go_gc_duration_seconds{quantile="0.5"} 8.3835e-05 +go_gc_duration_seconds{quantile="0.75"} 0.000106744 +go_gc_duration_seconds{quantile="1"} 0.002072195 +go_gc_duration_seconds_sum 0.012139815 +go_gc_duration_seconds_count 99 +go_goroutines 33 +go_memstats_alloc_bytes 1.7518624e+07 +go_memstats_alloc_bytes_total 8.3062296e+08 +go_memstats_buck_hash_sys_bytes 1.494637e+06 +go_memstats_frees_total 4.65658e+06 +go_memstats_gc_sys_bytes 1.107968e+06 +go_memstats_heap_alloc_bytes 1.7518624e+07 +go_memstats_heap_idle_bytes 6.668288e+06 +go_memstats_heap_inuse_bytes 1.8956288e+07 +go_memstats_heap_objects 72755 +go_memstats_heap_released_bytes_total 0 +go_memstats_heap_sys_bytes 2.5624576e+07 +go_memstats_last_gc_time_seconds 1.4843955586166437e+09 +go_memstats_lookups_total 2089 +go_memstats_mallocs_total 4.729335e+06 +go_memstats_mcache_inuse_bytes 9600 +go_memstats_mcache_sys_bytes 16384 +go_memstats_mspan_inuse_bytes 211520 +go_memstats_mspan_sys_bytes 245760 +go_memstats_next_gc_bytes 2.033527e+07 +go_memstats_other_sys_bytes 2.077323e+06 +go_memstats_stack_inuse_bytes 1.6384e+06 +go_memstats_stack_sys_bytes 1.6384e+06 +go_memstats_sys_bytes 3.2205048e+07 +http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN +http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN +http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="alerts"} 0 +http_request_duration_microseconds_count{handler="alerts"} 0 +http_request_duration_microseconds{handler="config",quantile="0.5"} NaN +http_request_duration_microseconds{handler="config",quantile="0.9"} NaN +http_request_duration_microseconds{handler="config",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="config"} 0 +http_request_duration_microseconds_count{handler="config"} 0 +http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN +http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN +http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="consoles"} 0 +http_request_duration_microseconds_count{handler="consoles"} 0 +http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN +http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN +http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="drop_series"} 0 +http_request_duration_microseconds_count{handler="drop_series"} 0 +http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN +http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN +http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="federate"} 0 +http_request_duration_microseconds_count{handler="federate"} 0 +http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN +http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN +http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="flags"} 0 +http_request_duration_microseconds_count{handler="flags"} 0 +http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655 +http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823 +http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823 +http_request_duration_microseconds_sum{handler="graph"} 5803.93 +http_request_duration_microseconds_count{handler="graph"} 3 +http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN +http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN +http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="heap"} 0 +http_request_duration_microseconds_count{handler="heap"} 0 +http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401 +http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708 +http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708 +http_request_duration_microseconds_sum{handler="label_values"} 3995.574 +http_request_duration_microseconds_count{handler="label_values"} 3 +http_request_duration_microseconds{handler="options",quantile="0.5"} NaN +http_request_duration_microseconds{handler="options",quantile="0.9"} NaN +http_request_duration_microseconds{handler="options",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="options"} 0 +http_request_duration_microseconds_count{handler="options"} 0 +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859 +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035 +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523 +http_request_duration_microseconds_sum{handler="prometheus"} 661851.54 +http_request_duration_microseconds_count{handler="prometheus"} 462 +http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448 +http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558 +http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558 +http_request_duration_microseconds_sum{handler="query"} 26074.11 +http_request_duration_microseconds_count{handler="query"} 6 +http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN +http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN +http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="query_range"} 0 +http_request_duration_microseconds_count{handler="query_range"} 0 +http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN +http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN +http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="rules"} 0 +http_request_duration_microseconds_count{handler="rules"} 0 +http_request_duration_microseconds{handler="series",quantile="0.5"} NaN +http_request_duration_microseconds{handler="series",quantile="0.9"} NaN +http_request_duration_microseconds{handler="series",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="series"} 0 +http_request_duration_microseconds_count{handler="series"} 0 +http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311 +http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174 +http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174 +http_request_duration_microseconds_sum{handler="static"} 6458.621 +http_request_duration_microseconds_count{handler="static"} 3 +http_request_duration_microseconds{handler="status",quantile="0.5"} NaN +http_request_duration_microseconds{handler="status",quantile="0.9"} NaN +http_request_duration_microseconds{handler="status",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="status"} 0 +http_request_duration_microseconds_count{handler="status"} 0 +http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN +http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN +http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="targets"} 0 +http_request_duration_microseconds_count{handler="targets"} 0 +http_request_duration_microseconds{handler="version",quantile="0.5"} NaN +http_request_duration_microseconds{handler="version",quantile="0.9"} NaN +http_request_duration_microseconds{handler="version",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="version"} 0 +http_request_duration_microseconds_count{handler="version"} 0 +http_request_size_bytes{handler="alerts",quantile="0.5"} NaN +http_request_size_bytes{handler="alerts",quantile="0.9"} NaN +http_request_size_bytes{handler="alerts",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="alerts"} 0 +http_request_size_bytes_count{handler="alerts"} 0 +http_request_size_bytes{handler="config",quantile="0.5"} NaN +http_request_size_bytes{handler="config",quantile="0.9"} NaN +http_request_size_bytes{handler="config",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="config"} 0 +http_request_size_bytes_count{handler="config"} 0 +http_request_size_bytes{handler="consoles",quantile="0.5"} NaN +http_request_size_bytes{handler="consoles",quantile="0.9"} NaN +http_request_size_bytes{handler="consoles",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="consoles"} 0 +http_request_size_bytes_count{handler="consoles"} 0 +http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN +http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN +http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="drop_series"} 0 +http_request_size_bytes_count{handler="drop_series"} 0 +http_request_size_bytes{handler="federate",quantile="0.5"} NaN +http_request_size_bytes{handler="federate",quantile="0.9"} NaN +http_request_size_bytes{handler="federate",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="federate"} 0 +http_request_size_bytes_count{handler="federate"} 0 +http_request_size_bytes{handler="flags",quantile="0.5"} NaN +http_request_size_bytes{handler="flags",quantile="0.9"} NaN +http_request_size_bytes{handler="flags",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="flags"} 0 +http_request_size_bytes_count{handler="flags"} 0 +http_request_size_bytes{handler="graph",quantile="0.5"} 367 +http_request_size_bytes{handler="graph",quantile="0.9"} 389 +http_request_size_bytes{handler="graph",quantile="0.99"} 389 +http_request_size_bytes_sum{handler="graph"} 1145 +http_request_size_bytes_count{handler="graph"} 3 +http_request_size_bytes{handler="heap",quantile="0.5"} NaN +http_request_size_bytes{handler="heap",quantile="0.9"} NaN +http_request_size_bytes{handler="heap",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="heap"} 0 +http_request_size_bytes_count{handler="heap"} 0 +http_request_size_bytes{handler="label_values",quantile="0.5"} 416 +http_request_size_bytes{handler="label_values",quantile="0.9"} 416 +http_request_size_bytes{handler="label_values",quantile="0.99"} 416 +http_request_size_bytes_sum{handler="label_values"} 1248 +http_request_size_bytes_count{handler="label_values"} 3 +http_request_size_bytes{handler="options",quantile="0.5"} NaN +http_request_size_bytes{handler="options",quantile="0.9"} NaN +http_request_size_bytes{handler="options",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="options"} 0 +http_request_size_bytes_count{handler="options"} 0 +http_request_size_bytes{handler="prometheus",quantile="0.5"} 238 +http_request_size_bytes{handler="prometheus",quantile="0.9"} 238 +http_request_size_bytes{handler="prometheus",quantile="0.99"} 238 +http_request_size_bytes_sum{handler="prometheus"} 109956 +http_request_size_bytes_count{handler="prometheus"} 462 +http_request_size_bytes{handler="query",quantile="0.5"} 531 +http_request_size_bytes{handler="query",quantile="0.9"} 531 +http_request_size_bytes{handler="query",quantile="0.99"} 531 +http_request_size_bytes_sum{handler="query"} 3186 +http_request_size_bytes_count{handler="query"} 6 +http_request_size_bytes{handler="query_range",quantile="0.5"} NaN +http_request_size_bytes{handler="query_range",quantile="0.9"} NaN +http_request_size_bytes{handler="query_range",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="query_range"} 0 +http_request_size_bytes_count{handler="query_range"} 0 +http_request_size_bytes{handler="rules",quantile="0.5"} NaN +http_request_size_bytes{handler="rules",quantile="0.9"} NaN +http_request_size_bytes{handler="rules",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="rules"} 0 +http_request_size_bytes_count{handler="rules"} 0 +http_request_size_bytes{handler="series",quantile="0.5"} NaN +http_request_size_bytes{handler="series",quantile="0.9"} NaN +http_request_size_bytes{handler="series",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="series"} 0 +http_request_size_bytes_count{handler="series"} 0 +http_request_size_bytes{handler="static",quantile="0.5"} 379 +http_request_size_bytes{handler="static",quantile="0.9"} 379 +http_request_size_bytes{handler="static",quantile="0.99"} 379 +http_request_size_bytes_sum{handler="static"} 1137 +http_request_size_bytes_count{handler="static"} 3 +http_request_size_bytes{handler="status",quantile="0.5"} NaN +http_request_size_bytes{handler="status",quantile="0.9"} NaN +http_request_size_bytes{handler="status",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="status"} 0 +http_request_size_bytes_count{handler="status"} 0 +http_request_size_bytes{handler="targets",quantile="0.5"} NaN +http_request_size_bytes{handler="targets",quantile="0.9"} NaN +http_request_size_bytes{handler="targets",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="targets"} 0 +http_request_size_bytes_count{handler="targets"} 0 +http_request_size_bytes{handler="version",quantile="0.5"} NaN +http_request_size_bytes{handler="version",quantile="0.9"} NaN +http_request_size_bytes{handler="version",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="version"} 0 +http_request_size_bytes_count{handler="version"} 0 +http_requests_total{code="200",handler="graph",method="get"} 3 +http_requests_total{code="200",handler="label_values",method="get"} 3 +http_requests_total{code="200",handler="prometheus",method="get"} 462 +http_requests_total{code="200",handler="query",method="get"} 6 +http_requests_total{code="200",handler="static",method="get"} 3 +http_response_size_bytes{handler="alerts",quantile="0.5"} NaN +http_response_size_bytes{handler="alerts",quantile="0.9"} NaN +http_response_size_bytes{handler="alerts",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="alerts"} 0 +http_response_size_bytes_count{handler="alerts"} 0 +http_response_size_bytes{handler="config",quantile="0.5"} NaN +http_response_size_bytes{handler="config",quantile="0.9"} NaN +http_response_size_bytes{handler="config",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="config"} 0 +http_response_size_bytes_count{handler="config"} 0 +http_response_size_bytes{handler="consoles",quantile="0.5"} NaN +http_response_size_bytes{handler="consoles",quantile="0.9"} NaN +http_response_size_bytes{handler="consoles",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="consoles"} 0 +http_response_size_bytes_count{handler="consoles"} 0 +http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN +http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN +http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="drop_series"} 0 +http_response_size_bytes_count{handler="drop_series"} 0 +http_response_size_bytes{handler="federate",quantile="0.5"} NaN +http_response_size_bytes{handler="federate",quantile="0.9"} NaN +http_response_size_bytes{handler="federate",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="federate"} 0 +http_response_size_bytes_count{handler="federate"} 0 +http_response_size_bytes{handler="flags",quantile="0.5"} NaN +http_response_size_bytes{handler="flags",quantile="0.9"} NaN +http_response_size_bytes{handler="flags",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="flags"} 0 +http_response_size_bytes_count{handler="flags"} 0 +http_response_size_bytes{handler="graph",quantile="0.5"} 3619 +http_response_size_bytes{handler="graph",quantile="0.9"} 3619 +http_response_size_bytes{handler="graph",quantile="0.99"} 3619 +http_response_size_bytes_sum{handler="graph"} 10857 +http_response_size_bytes_count{handler="graph"} 3 +http_response_size_bytes{handler="heap",quantile="0.5"} NaN +http_response_size_bytes{handler="heap",quantile="0.9"} NaN +http_response_size_bytes{handler="heap",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="heap"} 0 +http_response_size_bytes_count{handler="heap"} 0 +http_response_size_bytes{handler="label_values",quantile="0.5"} 642 +http_response_size_bytes{handler="label_values",quantile="0.9"} 642 +http_response_size_bytes{handler="label_values",quantile="0.99"} 642 +http_response_size_bytes_sum{handler="label_values"} 1926 +http_response_size_bytes_count{handler="label_values"} 3 +http_response_size_bytes{handler="options",quantile="0.5"} NaN +http_response_size_bytes{handler="options",quantile="0.9"} NaN +http_response_size_bytes{handler="options",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="options"} 0 +http_response_size_bytes_count{handler="options"} 0 +http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033 +http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123 +http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128 +http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06 +http_response_size_bytes_count{handler="prometheus"} 462 +http_response_size_bytes{handler="query",quantile="0.5"} 776 +http_response_size_bytes{handler="query",quantile="0.9"} 781 +http_response_size_bytes{handler="query",quantile="0.99"} 781 +http_response_size_bytes_sum{handler="query"} 4656 +http_response_size_bytes_count{handler="query"} 6 +http_response_size_bytes{handler="query_range",quantile="0.5"} NaN +http_response_size_bytes{handler="query_range",quantile="0.9"} NaN +http_response_size_bytes{handler="query_range",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="query_range"} 0 +http_response_size_bytes_count{handler="query_range"} 0 +http_response_size_bytes{handler="rules",quantile="0.5"} NaN +http_response_size_bytes{handler="rules",quantile="0.9"} NaN +http_response_size_bytes{handler="rules",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="rules"} 0 +http_response_size_bytes_count{handler="rules"} 0 +http_response_size_bytes{handler="series",quantile="0.5"} NaN +http_response_size_bytes{handler="series",quantile="0.9"} NaN +http_response_size_bytes{handler="series",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="series"} 0 +http_response_size_bytes_count{handler="series"} 0 +http_response_size_bytes{handler="static",quantile="0.5"} 6316 +http_response_size_bytes{handler="static",quantile="0.9"} 6316 +http_response_size_bytes{handler="static",quantile="0.99"} 6316 +http_response_size_bytes_sum{handler="static"} 18948 +http_response_size_bytes_count{handler="static"} 3 +http_response_size_bytes{handler="status",quantile="0.5"} NaN +http_response_size_bytes{handler="status",quantile="0.9"} NaN +http_response_size_bytes{handler="status",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="status"} 0 +http_response_size_bytes_count{handler="status"} 0 +http_response_size_bytes{handler="targets",quantile="0.5"} NaN +http_response_size_bytes{handler="targets",quantile="0.9"} NaN +http_response_size_bytes{handler="targets",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="targets"} 0 +http_response_size_bytes_count{handler="targets"} 0 +http_response_size_bytes{handler="version",quantile="0.5"} NaN +http_response_size_bytes{handler="version",quantile="0.9"} NaN +http_response_size_bytes{handler="version",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="version"} 0 +http_response_size_bytes_count{handler="version"} 0 +prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1 +prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09 +prometheus_config_last_reload_successful 1 +prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds_count 1 +prometheus_evaluator_iterations_skipped_total 0 +prometheus_notifications_dropped_total 0 +prometheus_notifications_queue_capacity 10000 +prometheus_notifications_queue_length 0 +prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0 +prometheus_rule_evaluation_failures_total{rule_type="recording"} 0 +prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_azure_refresh_duration_seconds_sum 0 +prometheus_sd_azure_refresh_duration_seconds_count 0 +prometheus_sd_azure_refresh_failures_total 0 +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_failures_total 0 +prometheus_sd_dns_lookup_failures_total 0 +prometheus_sd_dns_lookups_total 0 +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_ec2_refresh_duration_seconds_sum 0 +prometheus_sd_ec2_refresh_duration_seconds_count 0 +prometheus_sd_ec2_refresh_failures_total 0 +prometheus_sd_file_read_errors_total 0 +prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN +prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN +prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN +prometheus_sd_file_scan_duration_seconds_sum 0 +prometheus_sd_file_scan_duration_seconds_count 0 +prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN +prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN +prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN +prometheus_sd_gce_refresh_duration_sum 0 +prometheus_sd_gce_refresh_duration_count 0 +prometheus_sd_gce_refresh_failures_total 0 +prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_marathon_refresh_duration_seconds_sum 0 +prometheus_sd_marathon_refresh_duration_seconds_count 0 +prometheus_sd_marathon_refresh_failures_total 0 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006 +prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995 +prometheus_target_interval_length_seconds_count{interval="50ms"} 685 +prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1 +prometheus_target_skipped_scrapes_total 0 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002 +prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002 +prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 +prometheus_treecache_watcher_goroutines 0 +prometheus_treecache_zookeeper_failures_total 0 +# EOF diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.txt b/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.txt new file mode 100644 index 00000000000..174f383e911 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.txt @@ -0,0 +1,529 @@ +# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 4.9351e-05 +go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 +go_gc_duration_seconds{quantile="0.5"} 8.3835e-05 +go_gc_duration_seconds{quantile="0.75"} 0.000106744 +go_gc_duration_seconds{quantile="1"} 0.002072195 +go_gc_duration_seconds_sum 0.012139815 +go_gc_duration_seconds_count 99 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 33 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 1.7518624e+07 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 8.3062296e+08 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.494637e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 4.65658e+06 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 1.107968e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 1.7518624e+07 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 6.668288e+06 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 1.8956288e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 72755 +# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes_total counter +go_memstats_heap_released_bytes_total 0 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 2.5624576e+07 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.4843955586166437e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 2089 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 4.729335e+06 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 9600 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 16384 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 211520 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 245760 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 2.033527e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 2.077323e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 1.6384e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 1.6384e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 3.2205048e+07 +# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. +# TYPE http_request_duration_microseconds summary +http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN +http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN +http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="alerts"} 0 +http_request_duration_microseconds_count{handler="alerts"} 0 +http_request_duration_microseconds{handler="config",quantile="0.5"} NaN +http_request_duration_microseconds{handler="config",quantile="0.9"} NaN +http_request_duration_microseconds{handler="config",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="config"} 0 +http_request_duration_microseconds_count{handler="config"} 0 +http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN +http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN +http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="consoles"} 0 +http_request_duration_microseconds_count{handler="consoles"} 0 +http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN +http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN +http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="drop_series"} 0 +http_request_duration_microseconds_count{handler="drop_series"} 0 +http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN +http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN +http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="federate"} 0 +http_request_duration_microseconds_count{handler="federate"} 0 +http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN +http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN +http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="flags"} 0 +http_request_duration_microseconds_count{handler="flags"} 0 +http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655 +http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823 +http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823 +http_request_duration_microseconds_sum{handler="graph"} 5803.93 +http_request_duration_microseconds_count{handler="graph"} 3 +http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN +http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN +http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="heap"} 0 +http_request_duration_microseconds_count{handler="heap"} 0 +http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401 +http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708 +http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708 +http_request_duration_microseconds_sum{handler="label_values"} 3995.574 +http_request_duration_microseconds_count{handler="label_values"} 3 +http_request_duration_microseconds{handler="options",quantile="0.5"} NaN +http_request_duration_microseconds{handler="options",quantile="0.9"} NaN +http_request_duration_microseconds{handler="options",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="options"} 0 +http_request_duration_microseconds_count{handler="options"} 0 +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859 +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035 +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523 +http_request_duration_microseconds_sum{handler="prometheus"} 661851.54 +http_request_duration_microseconds_count{handler="prometheus"} 462 +http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448 +http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558 +http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558 +http_request_duration_microseconds_sum{handler="query"} 26074.11 +http_request_duration_microseconds_count{handler="query"} 6 +http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN +http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN +http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="query_range"} 0 +http_request_duration_microseconds_count{handler="query_range"} 0 +http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN +http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN +http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="rules"} 0 +http_request_duration_microseconds_count{handler="rules"} 0 +http_request_duration_microseconds{handler="series",quantile="0.5"} NaN +http_request_duration_microseconds{handler="series",quantile="0.9"} NaN +http_request_duration_microseconds{handler="series",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="series"} 0 +http_request_duration_microseconds_count{handler="series"} 0 +http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311 +http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174 +http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174 +http_request_duration_microseconds_sum{handler="static"} 6458.621 +http_request_duration_microseconds_count{handler="static"} 3 +http_request_duration_microseconds{handler="status",quantile="0.5"} NaN +http_request_duration_microseconds{handler="status",quantile="0.9"} NaN +http_request_duration_microseconds{handler="status",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="status"} 0 +http_request_duration_microseconds_count{handler="status"} 0 +http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN +http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN +http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="targets"} 0 +http_request_duration_microseconds_count{handler="targets"} 0 +http_request_duration_microseconds{handler="version",quantile="0.5"} NaN +http_request_duration_microseconds{handler="version",quantile="0.9"} NaN +http_request_duration_microseconds{handler="version",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="version"} 0 +http_request_duration_microseconds_count{handler="version"} 0 +# HELP http_request_size_bytes The HTTP request sizes in bytes. +# TYPE http_request_size_bytes summary +http_request_size_bytes{handler="alerts",quantile="0.5"} NaN +http_request_size_bytes{handler="alerts",quantile="0.9"} NaN +http_request_size_bytes{handler="alerts",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="alerts"} 0 +http_request_size_bytes_count{handler="alerts"} 0 +http_request_size_bytes{handler="config",quantile="0.5"} NaN +http_request_size_bytes{handler="config",quantile="0.9"} NaN +http_request_size_bytes{handler="config",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="config"} 0 +http_request_size_bytes_count{handler="config"} 0 +http_request_size_bytes{handler="consoles",quantile="0.5"} NaN +http_request_size_bytes{handler="consoles",quantile="0.9"} NaN +http_request_size_bytes{handler="consoles",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="consoles"} 0 +http_request_size_bytes_count{handler="consoles"} 0 +http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN +http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN +http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="drop_series"} 0 +http_request_size_bytes_count{handler="drop_series"} 0 +http_request_size_bytes{handler="federate",quantile="0.5"} NaN +http_request_size_bytes{handler="federate",quantile="0.9"} NaN +http_request_size_bytes{handler="federate",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="federate"} 0 +http_request_size_bytes_count{handler="federate"} 0 +http_request_size_bytes{handler="flags",quantile="0.5"} NaN +http_request_size_bytes{handler="flags",quantile="0.9"} NaN +http_request_size_bytes{handler="flags",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="flags"} 0 +http_request_size_bytes_count{handler="flags"} 0 +http_request_size_bytes{handler="graph",quantile="0.5"} 367 +http_request_size_bytes{handler="graph",quantile="0.9"} 389 +http_request_size_bytes{handler="graph",quantile="0.99"} 389 +http_request_size_bytes_sum{handler="graph"} 1145 +http_request_size_bytes_count{handler="graph"} 3 +http_request_size_bytes{handler="heap",quantile="0.5"} NaN +http_request_size_bytes{handler="heap",quantile="0.9"} NaN +http_request_size_bytes{handler="heap",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="heap"} 0 +http_request_size_bytes_count{handler="heap"} 0 +http_request_size_bytes{handler="label_values",quantile="0.5"} 416 +http_request_size_bytes{handler="label_values",quantile="0.9"} 416 +http_request_size_bytes{handler="label_values",quantile="0.99"} 416 +http_request_size_bytes_sum{handler="label_values"} 1248 +http_request_size_bytes_count{handler="label_values"} 3 +http_request_size_bytes{handler="options",quantile="0.5"} NaN +http_request_size_bytes{handler="options",quantile="0.9"} NaN +http_request_size_bytes{handler="options",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="options"} 0 +http_request_size_bytes_count{handler="options"} 0 +http_request_size_bytes{handler="prometheus",quantile="0.5"} 238 +http_request_size_bytes{handler="prometheus",quantile="0.9"} 238 +http_request_size_bytes{handler="prometheus",quantile="0.99"} 238 +http_request_size_bytes_sum{handler="prometheus"} 109956 +http_request_size_bytes_count{handler="prometheus"} 462 +http_request_size_bytes{handler="query",quantile="0.5"} 531 +http_request_size_bytes{handler="query",quantile="0.9"} 531 +http_request_size_bytes{handler="query",quantile="0.99"} 531 +http_request_size_bytes_sum{handler="query"} 3186 +http_request_size_bytes_count{handler="query"} 6 +http_request_size_bytes{handler="query_range",quantile="0.5"} NaN +http_request_size_bytes{handler="query_range",quantile="0.9"} NaN +http_request_size_bytes{handler="query_range",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="query_range"} 0 +http_request_size_bytes_count{handler="query_range"} 0 +http_request_size_bytes{handler="rules",quantile="0.5"} NaN +http_request_size_bytes{handler="rules",quantile="0.9"} NaN +http_request_size_bytes{handler="rules",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="rules"} 0 +http_request_size_bytes_count{handler="rules"} 0 +http_request_size_bytes{handler="series",quantile="0.5"} NaN +http_request_size_bytes{handler="series",quantile="0.9"} NaN +http_request_size_bytes{handler="series",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="series"} 0 +http_request_size_bytes_count{handler="series"} 0 +http_request_size_bytes{handler="static",quantile="0.5"} 379 +http_request_size_bytes{handler="static",quantile="0.9"} 379 +http_request_size_bytes{handler="static",quantile="0.99"} 379 +http_request_size_bytes_sum{handler="static"} 1137 +http_request_size_bytes_count{handler="static"} 3 +http_request_size_bytes{handler="status",quantile="0.5"} NaN +http_request_size_bytes{handler="status",quantile="0.9"} NaN +http_request_size_bytes{handler="status",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="status"} 0 +http_request_size_bytes_count{handler="status"} 0 +http_request_size_bytes{handler="targets",quantile="0.5"} NaN +http_request_size_bytes{handler="targets",quantile="0.9"} NaN +http_request_size_bytes{handler="targets",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="targets"} 0 +http_request_size_bytes_count{handler="targets"} 0 +http_request_size_bytes{handler="version",quantile="0.5"} NaN +http_request_size_bytes{handler="version",quantile="0.9"} NaN +http_request_size_bytes{handler="version",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="version"} 0 +http_request_size_bytes_count{handler="version"} 0 +# HELP http_requests_total Total number of HTTP requests made. +# TYPE http_requests_total counter +http_requests_total{code="200",handler="graph",method="get"} 3 +http_requests_total{code="200",handler="label_values",method="get"} 3 +http_requests_total{code="200",handler="prometheus",method="get"} 462 +http_requests_total{code="200",handler="query",method="get"} 6 +http_requests_total{code="200",handler="static",method="get"} 3 +# HELP http_response_size_bytes The HTTP response sizes in bytes. +# TYPE http_response_size_bytes summary +http_response_size_bytes{handler="alerts",quantile="0.5"} NaN +http_response_size_bytes{handler="alerts",quantile="0.9"} NaN +http_response_size_bytes{handler="alerts",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="alerts"} 0 +http_response_size_bytes_count{handler="alerts"} 0 +http_response_size_bytes{handler="config",quantile="0.5"} NaN +http_response_size_bytes{handler="config",quantile="0.9"} NaN +http_response_size_bytes{handler="config",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="config"} 0 +http_response_size_bytes_count{handler="config"} 0 +http_response_size_bytes{handler="consoles",quantile="0.5"} NaN +http_response_size_bytes{handler="consoles",quantile="0.9"} NaN +http_response_size_bytes{handler="consoles",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="consoles"} 0 +http_response_size_bytes_count{handler="consoles"} 0 +http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN +http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN +http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="drop_series"} 0 +http_response_size_bytes_count{handler="drop_series"} 0 +http_response_size_bytes{handler="federate",quantile="0.5"} NaN +http_response_size_bytes{handler="federate",quantile="0.9"} NaN +http_response_size_bytes{handler="federate",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="federate"} 0 +http_response_size_bytes_count{handler="federate"} 0 +http_response_size_bytes{handler="flags",quantile="0.5"} NaN +http_response_size_bytes{handler="flags",quantile="0.9"} NaN +http_response_size_bytes{handler="flags",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="flags"} 0 +http_response_size_bytes_count{handler="flags"} 0 +http_response_size_bytes{handler="graph",quantile="0.5"} 3619 +http_response_size_bytes{handler="graph",quantile="0.9"} 3619 +http_response_size_bytes{handler="graph",quantile="0.99"} 3619 +http_response_size_bytes_sum{handler="graph"} 10857 +http_response_size_bytes_count{handler="graph"} 3 +http_response_size_bytes{handler="heap",quantile="0.5"} NaN +http_response_size_bytes{handler="heap",quantile="0.9"} NaN +http_response_size_bytes{handler="heap",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="heap"} 0 +http_response_size_bytes_count{handler="heap"} 0 +http_response_size_bytes{handler="label_values",quantile="0.5"} 642 +http_response_size_bytes{handler="label_values",quantile="0.9"} 642 +http_response_size_bytes{handler="label_values",quantile="0.99"} 642 +http_response_size_bytes_sum{handler="label_values"} 1926 +http_response_size_bytes_count{handler="label_values"} 3 +http_response_size_bytes{handler="options",quantile="0.5"} NaN +http_response_size_bytes{handler="options",quantile="0.9"} NaN +http_response_size_bytes{handler="options",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="options"} 0 +http_response_size_bytes_count{handler="options"} 0 +http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033 +http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123 +http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128 +http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06 +http_response_size_bytes_count{handler="prometheus"} 462 +http_response_size_bytes{handler="query",quantile="0.5"} 776 +http_response_size_bytes{handler="query",quantile="0.9"} 781 +http_response_size_bytes{handler="query",quantile="0.99"} 781 +http_response_size_bytes_sum{handler="query"} 4656 +http_response_size_bytes_count{handler="query"} 6 +http_response_size_bytes{handler="query_range",quantile="0.5"} NaN +http_response_size_bytes{handler="query_range",quantile="0.9"} NaN +http_response_size_bytes{handler="query_range",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="query_range"} 0 +http_response_size_bytes_count{handler="query_range"} 0 +http_response_size_bytes{handler="rules",quantile="0.5"} NaN +http_response_size_bytes{handler="rules",quantile="0.9"} NaN +http_response_size_bytes{handler="rules",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="rules"} 0 +http_response_size_bytes_count{handler="rules"} 0 +http_response_size_bytes{handler="series",quantile="0.5"} NaN +http_response_size_bytes{handler="series",quantile="0.9"} NaN +http_response_size_bytes{handler="series",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="series"} 0 +http_response_size_bytes_count{handler="series"} 0 +http_response_size_bytes{handler="static",quantile="0.5"} 6316 +http_response_size_bytes{handler="static",quantile="0.9"} 6316 +http_response_size_bytes{handler="static",quantile="0.99"} 6316 +http_response_size_bytes_sum{handler="static"} 18948 +http_response_size_bytes_count{handler="static"} 3 +http_response_size_bytes{handler="status",quantile="0.5"} NaN +http_response_size_bytes{handler="status",quantile="0.9"} NaN +http_response_size_bytes{handler="status",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="status"} 0 +http_response_size_bytes_count{handler="status"} 0 +http_response_size_bytes{handler="targets",quantile="0.5"} NaN +http_response_size_bytes{handler="targets",quantile="0.9"} NaN +http_response_size_bytes{handler="targets",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="targets"} 0 +http_response_size_bytes_count{handler="targets"} 0 +http_response_size_bytes{handler="version",quantile="0.5"} NaN +http_response_size_bytes{handler="version",quantile="0.9"} NaN +http_response_size_bytes{handler="version",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="version"} 0 +http_response_size_bytes_count{handler="version"} 0 +# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which prometheus was built. +# TYPE prometheus_build_info gauge +prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1 +# HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload. +# TYPE prometheus_config_last_reload_success_timestamp_seconds gauge +prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09 +# HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful. +# TYPE prometheus_config_last_reload_successful gauge +prometheus_config_last_reload_successful 1 +# HELP prometheus_evaluator_duration_seconds The duration of rule group evaluations. +# TYPE prometheus_evaluator_duration_seconds summary +prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds_count 1 +# HELP prometheus_evaluator_iterations_skipped_total The total number of rule group evaluations skipped due to throttled metric storage. +# TYPE prometheus_evaluator_iterations_skipped_total counter +prometheus_evaluator_iterations_skipped_total 0 +# HELP prometheus_notifications_dropped_total Total number of alerts dropped due to alert manager missing in configuration. +# TYPE prometheus_notifications_dropped_total counter +prometheus_notifications_dropped_total 0 +# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. +# TYPE prometheus_notifications_queue_capacity gauge +prometheus_notifications_queue_capacity 10000 +# HELP prometheus_notifications_queue_length The number of alert notifications in the queue. +# TYPE prometheus_notifications_queue_length gauge +prometheus_notifications_queue_length 0 +# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. +# TYPE prometheus_rule_evaluation_failures_total counter +prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0 +prometheus_rule_evaluation_failures_total{rule_type="recording"} 0 +# HELP prometheus_sd_azure_refresh_duration_seconds The duration of a Azure-SD refresh in seconds. +# TYPE prometheus_sd_azure_refresh_duration_seconds summary +prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_azure_refresh_duration_seconds_sum 0 +prometheus_sd_azure_refresh_duration_seconds_count 0 +# HELP prometheus_sd_azure_refresh_failures_total Number of Azure-SD refresh failures. +# TYPE prometheus_sd_azure_refresh_failures_total counter +prometheus_sd_azure_refresh_failures_total 0 +# HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds. +# TYPE prometheus_sd_consul_rpc_duration_seconds summary +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 +# HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures. +# TYPE prometheus_sd_consul_rpc_failures_total counter +prometheus_sd_consul_rpc_failures_total 0 +# HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures. +# TYPE prometheus_sd_dns_lookup_failures_total counter +prometheus_sd_dns_lookup_failures_total 0 +# HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups. +# TYPE prometheus_sd_dns_lookups_total counter +prometheus_sd_dns_lookups_total 0 +# HELP prometheus_sd_ec2_refresh_duration_seconds The duration of a EC2-SD refresh in seconds. +# TYPE prometheus_sd_ec2_refresh_duration_seconds summary +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_ec2_refresh_duration_seconds_sum 0 +prometheus_sd_ec2_refresh_duration_seconds_count 0 +# HELP prometheus_sd_ec2_refresh_failures_total The number of EC2-SD scrape failures. +# TYPE prometheus_sd_ec2_refresh_failures_total counter +prometheus_sd_ec2_refresh_failures_total 0 +# HELP prometheus_sd_file_read_errors_total The number of File-SD read errors. +# TYPE prometheus_sd_file_read_errors_total counter +prometheus_sd_file_read_errors_total 0 +# HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds. +# TYPE prometheus_sd_file_scan_duration_seconds summary +prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN +prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN +prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN +prometheus_sd_file_scan_duration_seconds_sum 0 +prometheus_sd_file_scan_duration_seconds_count 0 +# HELP prometheus_sd_gce_refresh_duration The duration of a GCE-SD refresh in seconds. +# TYPE prometheus_sd_gce_refresh_duration summary +prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN +prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN +prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN +prometheus_sd_gce_refresh_duration_sum 0 +prometheus_sd_gce_refresh_duration_count 0 +# HELP prometheus_sd_gce_refresh_failures_total The number of GCE-SD refresh failures. +# TYPE prometheus_sd_gce_refresh_failures_total counter +prometheus_sd_gce_refresh_failures_total 0 +# HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled. +# TYPE prometheus_sd_kubernetes_events_total counter +prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 +# HELP prometheus_sd_marathon_refresh_duration_seconds The duration of a Marathon-SD refresh in seconds. +# TYPE prometheus_sd_marathon_refresh_duration_seconds summary +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_marathon_refresh_duration_seconds_sum 0 +prometheus_sd_marathon_refresh_duration_seconds_count 0 +# HELP prometheus_sd_marathon_refresh_failures_total The number of Marathon-SD refresh failures. +# TYPE prometheus_sd_marathon_refresh_failures_total counter +prometheus_sd_marathon_refresh_failures_total 0 +# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. +# TYPE prometheus_target_interval_length_seconds summary +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006 +prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995 +prometheus_target_interval_length_seconds_count{interval="50ms"} 685 +# HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool. +# TYPE prometheus_target_scrape_pool_sync_total counter +prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1 +# HELP prometheus_target_skipped_scrapes_total Total number of scrapes that were skipped because the metric storage was throttled. +# TYPE prometheus_target_skipped_scrapes_total counter +prometheus_target_skipped_scrapes_total 0 +# HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool. +# TYPE prometheus_target_sync_length_seconds summary +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002 +prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002 +prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 +# HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines. +# TYPE prometheus_treecache_watcher_goroutines gauge +prometheus_treecache_watcher_goroutines 0 +# HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures. +# TYPE prometheus_treecache_zookeeper_failures_total counter +prometheus_treecache_zookeeper_failures_total 0 +# EOF diff --git a/vendor/github.com/prometheus/prometheus/model/timestamp/timestamp.go b/vendor/github.com/prometheus/prometheus/model/timestamp/timestamp.go new file mode 100644 index 00000000000..93458f644d1 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/timestamp/timestamp.go @@ -0,0 +1,34 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package timestamp + +import ( + "math" + "time" +) + +// FromTime returns a new millisecond timestamp from a time. +func FromTime(t time.Time) int64 { + return t.Unix()*1000 + int64(t.Nanosecond())/int64(time.Millisecond) +} + +// Time returns a new time.Time object from a millisecond timestamp. +func Time(ts int64) time.Time { + return time.Unix(ts/1000, (ts%1000)*int64(time.Millisecond)).UTC() +} + +// FromFloatSeconds returns a millisecond timestamp from float seconds. +func FromFloatSeconds(ts float64) int64 { + return int64(math.Round(ts * 1000)) +} diff --git a/vendor/github.com/prometheus/prometheus/model/value/value.go b/vendor/github.com/prometheus/prometheus/model/value/value.go new file mode 100644 index 00000000000..655ce852d51 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/value/value.go @@ -0,0 +1,34 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package value + +import ( + "math" +) + +const ( + // NormalNaN is a quiet NaN. This is also math.NaN(). + NormalNaN uint64 = 0x7ff8000000000001 + + // StaleNaN is a signaling NaN, due to the MSB of the mantissa being 0. + // This value is chosen with many leading 0s, so we have scope to store more + // complicated values in the future. It is 2 rather than 1 to make + // it easier to distinguish from the NormalNaN by a human when debugging. + StaleNaN uint64 = 0x7ff0000000000002 +) + +// IsStaleNaN returns true when the provided NaN value is a stale marker. +func IsStaleNaN(v float64) bool { + return math.Float64bits(v) == StaleNaN +} diff --git a/vendor/github.com/prometheus/prometheus/scrape/manager.go b/vendor/github.com/prometheus/prometheus/scrape/manager.go new file mode 100644 index 00000000000..35d47a86b62 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/scrape/manager.go @@ -0,0 +1,329 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scrape + +import ( + "fmt" + "hash/fnv" + "reflect" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/osutil" +) + +var targetMetadataCache = newMetadataMetricsCollector() + +// MetadataMetricsCollector is a Custom Collector for the metadata cache metrics. +type MetadataMetricsCollector struct { + CacheEntries *prometheus.Desc + CacheBytes *prometheus.Desc + + scrapeManager *Manager +} + +func newMetadataMetricsCollector() *MetadataMetricsCollector { + return &MetadataMetricsCollector{ + CacheEntries: prometheus.NewDesc( + "prometheus_target_metadata_cache_entries", + "Total number of metric metadata entries in the cache", + []string{"scrape_job"}, + nil, + ), + CacheBytes: prometheus.NewDesc( + "prometheus_target_metadata_cache_bytes", + "The number of bytes that are currently used for storing metric metadata in the cache", + []string{"scrape_job"}, + nil, + ), + } +} + +func (mc *MetadataMetricsCollector) registerManager(m *Manager) { + mc.scrapeManager = m +} + +// Describe sends the metrics descriptions to the channel. +func (mc *MetadataMetricsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- mc.CacheEntries + ch <- mc.CacheBytes +} + +// Collect creates and sends the metrics for the metadata cache. +func (mc *MetadataMetricsCollector) Collect(ch chan<- prometheus.Metric) { + if mc.scrapeManager == nil { + return + } + + for tset, targets := range mc.scrapeManager.TargetsActive() { + var size, length int + for _, t := range targets { + size += t.MetadataSize() + length += t.MetadataLength() + } + + ch <- prometheus.MustNewConstMetric( + mc.CacheEntries, + prometheus.GaugeValue, + float64(length), + tset, + ) + + ch <- prometheus.MustNewConstMetric( + mc.CacheBytes, + prometheus.GaugeValue, + float64(size), + tset, + ) + } +} + +// NewManager is the Manager constructor +func NewManager(o *Options, logger log.Logger, app storage.Appendable) *Manager { + if o == nil { + o = &Options{} + } + if logger == nil { + logger = log.NewNopLogger() + } + m := &Manager{ + append: app, + opts: o, + logger: logger, + scrapeConfigs: make(map[string]*config.ScrapeConfig), + scrapePools: make(map[string]*scrapePool), + graceShut: make(chan struct{}), + triggerReload: make(chan struct{}, 1), + } + targetMetadataCache.registerManager(m) + + return m +} + +// Options are the configuration parameters to the scrape manager. +type Options struct { + ExtraMetrics bool +} + +// Manager maintains a set of scrape pools and manages start/stop cycles +// when receiving new target groups from the discovery manager. +type Manager struct { + opts *Options + logger log.Logger + append storage.Appendable + graceShut chan struct{} + + jitterSeed uint64 // Global jitterSeed seed is used to spread scrape workload across HA setup. + mtxScrape sync.Mutex // Guards the fields below. + scrapeConfigs map[string]*config.ScrapeConfig + scrapePools map[string]*scrapePool + targetSets map[string][]*targetgroup.Group + + triggerReload chan struct{} +} + +// Run receives and saves target set updates and triggers the scraping loops reloading. +// Reloading happens in the background so that it doesn't block receiving targets updates. +func (m *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) error { + go m.reloader() + for { + select { + case ts := <-tsets: + m.updateTsets(ts) + + select { + case m.triggerReload <- struct{}{}: + default: + } + + case <-m.graceShut: + return nil + } + } +} + +func (m *Manager) reloader() { + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + + for { + select { + case <-m.graceShut: + return + case <-ticker.C: + select { + case <-m.triggerReload: + m.reload() + case <-m.graceShut: + return + } + } + } +} + +func (m *Manager) reload() { + m.mtxScrape.Lock() + var wg sync.WaitGroup + for setName, groups := range m.targetSets { + if _, ok := m.scrapePools[setName]; !ok { + scrapeConfig, ok := m.scrapeConfigs[setName] + if !ok { + level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName) + continue + } + sp, err := newScrapePool(scrapeConfig, m.append, m.jitterSeed, log.With(m.logger, "scrape_pool", setName), m.opts.ExtraMetrics) + if err != nil { + level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName) + continue + } + m.scrapePools[setName] = sp + } + + wg.Add(1) + // Run the sync in parallel as these take a while and at high load can't catch up. + go func(sp *scrapePool, groups []*targetgroup.Group) { + sp.Sync(groups) + wg.Done() + }(m.scrapePools[setName], groups) + + } + m.mtxScrape.Unlock() + wg.Wait() +} + +// setJitterSeed calculates a global jitterSeed per server relying on extra label set. +func (m *Manager) setJitterSeed(labels labels.Labels) error { + h := fnv.New64a() + hostname, err := osutil.GetFQDN() + if err != nil { + return err + } + if _, err := fmt.Fprintf(h, "%s%s", hostname, labels.String()); err != nil { + return err + } + m.jitterSeed = h.Sum64() + return nil +} + +// Stop cancels all running scrape pools and blocks until all have exited. +func (m *Manager) Stop() { + m.mtxScrape.Lock() + defer m.mtxScrape.Unlock() + + for _, sp := range m.scrapePools { + sp.stop() + } + close(m.graceShut) +} + +func (m *Manager) updateTsets(tsets map[string][]*targetgroup.Group) { + m.mtxScrape.Lock() + m.targetSets = tsets + m.mtxScrape.Unlock() +} + +// ApplyConfig resets the manager's target providers and job configurations as defined by the new cfg. +func (m *Manager) ApplyConfig(cfg *config.Config) error { + m.mtxScrape.Lock() + defer m.mtxScrape.Unlock() + + c := make(map[string]*config.ScrapeConfig) + for _, scfg := range cfg.ScrapeConfigs { + c[scfg.JobName] = scfg + } + m.scrapeConfigs = c + + if err := m.setJitterSeed(cfg.GlobalConfig.ExternalLabels); err != nil { + return err + } + + // Cleanup and reload pool if the configuration has changed. + var failed bool + for name, sp := range m.scrapePools { + if cfg, ok := m.scrapeConfigs[name]; !ok { + sp.stop() + delete(m.scrapePools, name) + } else if !reflect.DeepEqual(sp.config, cfg) { + err := sp.reload(cfg) + if err != nil { + level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name) + failed = true + } + } + } + + if failed { + return errors.New("failed to apply the new configuration") + } + return nil +} + +// TargetsAll returns active and dropped targets grouped by job_name. +func (m *Manager) TargetsAll() map[string][]*Target { + m.mtxScrape.Lock() + defer m.mtxScrape.Unlock() + + targets := make(map[string][]*Target, len(m.scrapePools)) + for tset, sp := range m.scrapePools { + targets[tset] = append(sp.ActiveTargets(), sp.DroppedTargets()...) + } + return targets +} + +// TargetsActive returns the active targets currently being scraped. +func (m *Manager) TargetsActive() map[string][]*Target { + m.mtxScrape.Lock() + defer m.mtxScrape.Unlock() + + var ( + wg sync.WaitGroup + mtx sync.Mutex + ) + + targets := make(map[string][]*Target, len(m.scrapePools)) + wg.Add(len(m.scrapePools)) + for tset, sp := range m.scrapePools { + // Running in parallel limits the blocking time of scrapePool to scrape + // interval when there's an update from SD. + go func(tset string, sp *scrapePool) { + mtx.Lock() + targets[tset] = sp.ActiveTargets() + mtx.Unlock() + wg.Done() + }(tset, sp) + } + wg.Wait() + return targets +} + +// TargetsDropped returns the dropped targets during relabelling. +func (m *Manager) TargetsDropped() map[string][]*Target { + m.mtxScrape.Lock() + defer m.mtxScrape.Unlock() + + targets := make(map[string][]*Target, len(m.scrapePools)) + for tset, sp := range m.scrapePools { + targets[tset] = sp.DroppedTargets() + } + return targets +} diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go new file mode 100644 index 00000000000..e6e55900b01 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -0,0 +1,1783 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scrape + +import ( + "bufio" + "bytes" + "compress/gzip" + "context" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "reflect" + "sort" + "strconv" + "sync" + "time" + "unsafe" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/prometheus/common/version" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/pool" +) + +// ScrapeTimestampTolerance is the tolerance for scrape appends timestamps +// alignment, to enable better compression at the TSDB level. +// See https://github.com/prometheus/prometheus/issues/7846 +var ScrapeTimestampTolerance = 2 * time.Millisecond + +// AlignScrapeTimestamps enables the tolerance for scrape appends timestamps described above. +var AlignScrapeTimestamps = true + +var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", labels.MetricName) + +var ( + targetIntervalLength = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "prometheus_target_interval_length_seconds", + Help: "Actual intervals between scrapes.", + Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001}, + }, + []string{"interval"}, + ) + targetReloadIntervalLength = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "prometheus_target_reload_length_seconds", + Help: "Actual interval to reload the scrape pool with a given configuration.", + Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001}, + }, + []string{"interval"}, + ) + targetScrapePools = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pools_total", + Help: "Total number of scrape pool creation attempts.", + }, + ) + targetScrapePoolsFailed = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pools_failed_total", + Help: "Total number of scrape pool creations that failed.", + }, + ) + targetScrapePoolReloads = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pool_reloads_total", + Help: "Total number of scrape pool reloads.", + }, + ) + targetScrapePoolReloadsFailed = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pool_reloads_failed_total", + Help: "Total number of failed scrape pool reloads.", + }, + ) + targetScrapePoolExceededTargetLimit = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pool_exceeded_target_limit_total", + Help: "Total number of times scrape pools hit the target limit, during sync or config reload.", + }, + ) + targetScrapePoolTargetLimit = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "prometheus_target_scrape_pool_target_limit", + Help: "Maximum number of targets allowed in this scrape pool.", + }, + []string{"scrape_job"}, + ) + targetScrapePoolTargetsAdded = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "prometheus_target_scrape_pool_targets", + Help: "Current number of targets in this scrape pool.", + }, + []string{"scrape_job"}, + ) + targetSyncIntervalLength = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "prometheus_target_sync_length_seconds", + Help: "Actual interval to sync the scrape pool.", + Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001}, + }, + []string{"scrape_job"}, + ) + targetScrapePoolSyncsCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pool_sync_total", + Help: "Total number of syncs that were executed on a scrape pool.", + }, + []string{"scrape_job"}, + ) + targetScrapeExceededBodySizeLimit = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_exceeded_body_size_limit_total", + Help: "Total number of scrapes that hit the body size limit", + }, + ) + targetScrapeSampleLimit = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_exceeded_sample_limit_total", + Help: "Total number of scrapes that hit the sample limit and were rejected.", + }, + ) + targetScrapeSampleDuplicate = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_sample_duplicate_timestamp_total", + Help: "Total number of samples rejected due to duplicate timestamps but different values.", + }, + ) + targetScrapeSampleOutOfOrder = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_sample_out_of_order_total", + Help: "Total number of samples rejected due to not being out of the expected order.", + }, + ) + targetScrapeSampleOutOfBounds = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_sample_out_of_bounds_total", + Help: "Total number of samples rejected due to timestamp falling outside of the time bounds.", + }, + ) + targetScrapeCacheFlushForced = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_cache_flush_forced_total", + Help: "How many times a scrape cache was flushed due to getting big while scrapes are failing.", + }, + ) + targetScrapeExemplarOutOfOrder = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_exemplar_out_of_order_total", + Help: "Total number of exemplar rejected due to not being out of the expected order.", + }, + ) + targetScrapePoolExceededLabelLimits = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pool_exceeded_label_limits_total", + Help: "Total number of times scrape pools hit the label limits, during sync or config reload.", + }, + ) + targetSyncFailed = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "prometheus_target_sync_failed_total", + Help: "Total number of target sync failures.", + }, + []string{"scrape_job"}, + ) +) + +func init() { + prometheus.MustRegister( + targetIntervalLength, + targetReloadIntervalLength, + targetScrapePools, + targetScrapePoolsFailed, + targetScrapePoolReloads, + targetScrapePoolReloadsFailed, + targetSyncIntervalLength, + targetScrapePoolSyncsCounter, + targetScrapeExceededBodySizeLimit, + targetScrapeSampleLimit, + targetScrapeSampleDuplicate, + targetScrapeSampleOutOfOrder, + targetScrapeSampleOutOfBounds, + targetScrapePoolExceededTargetLimit, + targetScrapePoolTargetLimit, + targetScrapePoolTargetsAdded, + targetScrapeCacheFlushForced, + targetMetadataCache, + targetScrapeExemplarOutOfOrder, + targetScrapePoolExceededLabelLimits, + targetSyncFailed, + ) +} + +// scrapePool manages scrapes for sets of targets. +type scrapePool struct { + appendable storage.Appendable + logger log.Logger + cancel context.CancelFunc + + // mtx must not be taken after targetMtx. + mtx sync.Mutex + config *config.ScrapeConfig + client *http.Client + loops map[uint64]loop + + targetMtx sync.Mutex + // activeTargets and loops must always be synchronized to have the same + // set of hashes. + activeTargets map[uint64]*Target + droppedTargets []*Target + + // Constructor for new scrape loops. This is settable for testing convenience. + newLoop func(scrapeLoopOptions) loop +} + +type labelLimits struct { + labelLimit int + labelNameLengthLimit int + labelValueLengthLimit int +} + +type scrapeLoopOptions struct { + target *Target + scraper scraper + sampleLimit int + labelLimits *labelLimits + honorLabels bool + honorTimestamps bool + interval time.Duration + timeout time.Duration + mrc []*relabel.Config + cache *scrapeCache +} + +const maxAheadTime = 10 * time.Minute + +type labelsMutator func(labels.Labels) labels.Labels + +func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, jitterSeed uint64, logger log.Logger, reportExtraMetrics bool) (*scrapePool, error) { + targetScrapePools.Inc() + if logger == nil { + logger = log.NewNopLogger() + } + + client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName) + if err != nil { + targetScrapePoolsFailed.Inc() + return nil, errors.Wrap(err, "error creating HTTP client") + } + + buffers := pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) + + ctx, cancel := context.WithCancel(context.Background()) + sp := &scrapePool{ + cancel: cancel, + appendable: app, + config: cfg, + client: client, + activeTargets: map[uint64]*Target{}, + loops: map[uint64]loop{}, + logger: logger, + } + sp.newLoop = func(opts scrapeLoopOptions) loop { + // Update the targets retrieval function for metadata to a new scrape cache. + cache := opts.cache + if cache == nil { + cache = newScrapeCache() + } + opts.target.SetMetadataStore(cache) + + return newScrapeLoop( + ctx, + opts.scraper, + log.With(logger, "target", opts.target), + buffers, + func(l labels.Labels) labels.Labels { + return mutateSampleLabels(l, opts.target, opts.honorLabels, opts.mrc) + }, + func(l labels.Labels) labels.Labels { return mutateReportSampleLabels(l, opts.target) }, + func(ctx context.Context) storage.Appender { return appender(app.Appender(ctx), opts.sampleLimit) }, + cache, + jitterSeed, + opts.honorTimestamps, + opts.sampleLimit, + opts.labelLimits, + opts.interval, + opts.timeout, + reportExtraMetrics, + ) + } + + return sp, nil +} + +func (sp *scrapePool) ActiveTargets() []*Target { + sp.targetMtx.Lock() + defer sp.targetMtx.Unlock() + + var tActive []*Target + for _, t := range sp.activeTargets { + tActive = append(tActive, t) + } + return tActive +} + +func (sp *scrapePool) DroppedTargets() []*Target { + sp.targetMtx.Lock() + defer sp.targetMtx.Unlock() + return sp.droppedTargets +} + +// stop terminates all scrape loops and returns after they all terminated. +func (sp *scrapePool) stop() { + sp.mtx.Lock() + defer sp.mtx.Unlock() + sp.cancel() + var wg sync.WaitGroup + + sp.targetMtx.Lock() + + for fp, l := range sp.loops { + wg.Add(1) + + go func(l loop) { + l.stop() + wg.Done() + }(l) + + delete(sp.loops, fp) + delete(sp.activeTargets, fp) + } + + sp.targetMtx.Unlock() + + wg.Wait() + sp.client.CloseIdleConnections() + + if sp.config != nil { + targetScrapePoolSyncsCounter.DeleteLabelValues(sp.config.JobName) + targetScrapePoolTargetLimit.DeleteLabelValues(sp.config.JobName) + targetScrapePoolTargetsAdded.DeleteLabelValues(sp.config.JobName) + targetSyncIntervalLength.DeleteLabelValues(sp.config.JobName) + targetSyncFailed.DeleteLabelValues(sp.config.JobName) + } +} + +// reload the scrape pool with the given scrape configuration. The target state is preserved +// but all scrape loops are restarted with the new scrape configuration. +// This method returns after all scrape loops that were stopped have stopped scraping. +func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { + sp.mtx.Lock() + defer sp.mtx.Unlock() + targetScrapePoolReloads.Inc() + start := time.Now() + + client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName) + if err != nil { + targetScrapePoolReloadsFailed.Inc() + return errors.Wrap(err, "error creating HTTP client") + } + + reuseCache := reusableCache(sp.config, cfg) + sp.config = cfg + oldClient := sp.client + sp.client = client + + targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) + + var ( + wg sync.WaitGroup + interval = time.Duration(sp.config.ScrapeInterval) + timeout = time.Duration(sp.config.ScrapeTimeout) + bodySizeLimit = int64(sp.config.BodySizeLimit) + sampleLimit = int(sp.config.SampleLimit) + labelLimits = &labelLimits{ + labelLimit: int(sp.config.LabelLimit), + labelNameLengthLimit: int(sp.config.LabelNameLengthLimit), + labelValueLengthLimit: int(sp.config.LabelValueLengthLimit), + } + honorLabels = sp.config.HonorLabels + honorTimestamps = sp.config.HonorTimestamps + mrc = sp.config.MetricRelabelConfigs + ) + + sp.targetMtx.Lock() + + forcedErr := sp.refreshTargetLimitErr() + for fp, oldLoop := range sp.loops { + var cache *scrapeCache + if oc := oldLoop.getCache(); reuseCache && oc != nil { + oldLoop.disableEndOfRunStalenessMarkers() + cache = oc + } else { + cache = newScrapeCache() + } + + var ( + t = sp.activeTargets[fp] + s = &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit} + newLoop = sp.newLoop(scrapeLoopOptions{ + target: t, + scraper: s, + sampleLimit: sampleLimit, + labelLimits: labelLimits, + honorLabels: honorLabels, + honorTimestamps: honorTimestamps, + mrc: mrc, + cache: cache, + interval: interval, + timeout: timeout, + }) + ) + wg.Add(1) + + go func(oldLoop, newLoop loop) { + oldLoop.stop() + wg.Done() + + newLoop.setForcedError(forcedErr) + newLoop.run(nil) + }(oldLoop, newLoop) + + sp.loops[fp] = newLoop + } + + sp.targetMtx.Unlock() + + wg.Wait() + oldClient.CloseIdleConnections() + targetReloadIntervalLength.WithLabelValues(interval.String()).Observe( + time.Since(start).Seconds(), + ) + return nil +} + +// Sync converts target groups into actual scrape targets and synchronizes +// the currently running scraper with the resulting set and returns all scraped and dropped targets. +func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { + sp.mtx.Lock() + defer sp.mtx.Unlock() + start := time.Now() + + sp.targetMtx.Lock() + var all []*Target + sp.droppedTargets = []*Target{} + for _, tg := range tgs { + targets, failures := TargetsFromGroup(tg, sp.config) + for _, err := range failures { + level.Error(sp.logger).Log("msg", "Creating target failed", "err", err) + } + targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures))) + for _, t := range targets { + if t.Labels().Len() > 0 { + all = append(all, t) + } else if t.DiscoveredLabels().Len() > 0 { + sp.droppedTargets = append(sp.droppedTargets, t) + } + } + } + sp.targetMtx.Unlock() + sp.sync(all) + + targetSyncIntervalLength.WithLabelValues(sp.config.JobName).Observe( + time.Since(start).Seconds(), + ) + targetScrapePoolSyncsCounter.WithLabelValues(sp.config.JobName).Inc() +} + +// sync takes a list of potentially duplicated targets, deduplicates them, starts +// scrape loops for new targets, and stops scrape loops for disappeared targets. +// It returns after all stopped scrape loops terminated. +func (sp *scrapePool) sync(targets []*Target) { + var ( + uniqueLoops = make(map[uint64]loop) + interval = time.Duration(sp.config.ScrapeInterval) + timeout = time.Duration(sp.config.ScrapeTimeout) + bodySizeLimit = int64(sp.config.BodySizeLimit) + sampleLimit = int(sp.config.SampleLimit) + labelLimits = &labelLimits{ + labelLimit: int(sp.config.LabelLimit), + labelNameLengthLimit: int(sp.config.LabelNameLengthLimit), + labelValueLengthLimit: int(sp.config.LabelValueLengthLimit), + } + honorLabels = sp.config.HonorLabels + honorTimestamps = sp.config.HonorTimestamps + mrc = sp.config.MetricRelabelConfigs + ) + + sp.targetMtx.Lock() + for _, t := range targets { + hash := t.hash() + + if _, ok := sp.activeTargets[hash]; !ok { + // The scrape interval and timeout labels are set to the config's values initially, + // so whether changed via relabeling or not, they'll exist and hold the correct values + // for every target. + var err error + interval, timeout, err = t.intervalAndTimeout(interval, timeout) + + s := &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit} + l := sp.newLoop(scrapeLoopOptions{ + target: t, + scraper: s, + sampleLimit: sampleLimit, + labelLimits: labelLimits, + honorLabels: honorLabels, + honorTimestamps: honorTimestamps, + mrc: mrc, + interval: interval, + timeout: timeout, + }) + if err != nil { + l.setForcedError(err) + } + + sp.activeTargets[hash] = t + sp.loops[hash] = l + + uniqueLoops[hash] = l + } else { + // This might be a duplicated target. + if _, ok := uniqueLoops[hash]; !ok { + uniqueLoops[hash] = nil + } + // Need to keep the most updated labels information + // for displaying it in the Service Discovery web page. + sp.activeTargets[hash].SetDiscoveredLabels(t.DiscoveredLabels()) + } + } + + var wg sync.WaitGroup + + // Stop and remove old targets and scraper loops. + for hash := range sp.activeTargets { + if _, ok := uniqueLoops[hash]; !ok { + wg.Add(1) + go func(l loop) { + l.stop() + wg.Done() + }(sp.loops[hash]) + + delete(sp.loops, hash) + delete(sp.activeTargets, hash) + } + } + + sp.targetMtx.Unlock() + + targetScrapePoolTargetsAdded.WithLabelValues(sp.config.JobName).Set(float64(len(uniqueLoops))) + forcedErr := sp.refreshTargetLimitErr() + for _, l := range sp.loops { + l.setForcedError(forcedErr) + } + for _, l := range uniqueLoops { + if l != nil { + go l.run(nil) + } + } + // Wait for all potentially stopped scrapers to terminate. + // This covers the case of flapping targets. If the server is under high load, a new scraper + // may be active and tries to insert. The old scraper that didn't terminate yet could still + // be inserting a previous sample set. + wg.Wait() +} + +// refreshTargetLimitErr returns an error that can be passed to the scrape loops +// if the number of targets exceeds the configured limit. +func (sp *scrapePool) refreshTargetLimitErr() error { + if sp.config == nil || sp.config.TargetLimit == 0 { + return nil + } + if l := len(sp.activeTargets); l > int(sp.config.TargetLimit) { + targetScrapePoolExceededTargetLimit.Inc() + return fmt.Errorf("target_limit exceeded (number of targets: %d, limit: %d)", l, sp.config.TargetLimit) + } + return nil +} + +func verifyLabelLimits(lset labels.Labels, limits *labelLimits) error { + if limits == nil { + return nil + } + + met := lset.Get(labels.MetricName) + if limits.labelLimit > 0 { + nbLabels := len(lset) + if nbLabels > int(limits.labelLimit) { + return fmt.Errorf("label_limit exceeded (metric: %.50s, number of label: %d, limit: %d)", met, nbLabels, limits.labelLimit) + } + } + + if limits.labelNameLengthLimit == 0 && limits.labelValueLengthLimit == 0 { + return nil + } + + for _, l := range lset { + if limits.labelNameLengthLimit > 0 { + nameLength := len(l.Name) + if nameLength > int(limits.labelNameLengthLimit) { + return fmt.Errorf("label_name_length_limit exceeded (metric: %.50s, label: %.50v, name length: %d, limit: %d)", met, l, nameLength, limits.labelNameLengthLimit) + } + } + + if limits.labelValueLengthLimit > 0 { + valueLength := len(l.Value) + if valueLength > int(limits.labelValueLengthLimit) { + return fmt.Errorf("label_value_length_limit exceeded (metric: %.50s, label: %.50v, value length: %d, limit: %d)", met, l, valueLength, limits.labelValueLengthLimit) + } + } + } + return nil +} + +func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*relabel.Config) labels.Labels { + lb := labels.NewBuilder(lset) + targetLabels := target.Labels() + + if honor { + for _, l := range targetLabels { + if !lset.Has(l.Name) { + lb.Set(l.Name, l.Value) + } + } + } else { + var conflictingExposedLabels labels.Labels + for _, l := range targetLabels { + existingValue := lset.Get(l.Name) + if existingValue != "" { + conflictingExposedLabels = append(conflictingExposedLabels, labels.Label{Name: l.Name, Value: existingValue}) + } + // It is now safe to set the target label. + lb.Set(l.Name, l.Value) + } + + if len(conflictingExposedLabels) > 0 { + resolveConflictingExposedLabels(lb, lset, targetLabels, conflictingExposedLabels) + } + } + + res := lb.Labels() + + if len(rc) > 0 { + res = relabel.Process(res, rc...) + } + + return res +} + +func resolveConflictingExposedLabels(lb *labels.Builder, exposedLabels, targetLabels, conflictingExposedLabels labels.Labels) { + sort.SliceStable(conflictingExposedLabels, func(i, j int) bool { + return len(conflictingExposedLabels[i].Name) < len(conflictingExposedLabels[j].Name) + }) + + for i, l := range conflictingExposedLabels { + newName := l.Name + for { + newName = model.ExportedLabelPrefix + newName + if !exposedLabels.Has(newName) && + !targetLabels.Has(newName) && + !conflictingExposedLabels[:i].Has(newName) { + conflictingExposedLabels[i].Name = newName + break + } + } + } + + for _, l := range conflictingExposedLabels { + lb.Set(l.Name, l.Value) + } +} + +func mutateReportSampleLabels(lset labels.Labels, target *Target) labels.Labels { + lb := labels.NewBuilder(lset) + + for _, l := range target.Labels() { + lb.Set(model.ExportedLabelPrefix+l.Name, lset.Get(l.Name)) + lb.Set(l.Name, l.Value) + } + + return lb.Labels() +} + +// appender returns an appender for ingested samples from the target. +func appender(app storage.Appender, limit int) storage.Appender { + app = &timeLimitAppender{ + Appender: app, + maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), + } + + // The limit is applied after metrics are potentially dropped via relabeling. + if limit > 0 { + app = &limitAppender{ + Appender: app, + limit: limit, + } + } + return app +} + +// A scraper retrieves samples and accepts a status report at the end. +type scraper interface { + scrape(ctx context.Context, w io.Writer) (string, error) + Report(start time.Time, dur time.Duration, err error) + offset(interval time.Duration, jitterSeed uint64) time.Duration +} + +// targetScraper implements the scraper interface for a target. +type targetScraper struct { + *Target + + client *http.Client + req *http.Request + timeout time.Duration + + gzipr *gzip.Reader + buf *bufio.Reader + + bodySizeLimit int64 +} + +var errBodySizeLimit = errors.New("body size limit exceeded") + +const acceptHeader = `application/openmetrics-text; version=0.0.1,text/plain;version=0.0.4;q=0.5,*/*;q=0.1` + +var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version) + +func (s *targetScraper) scrape(ctx context.Context, w io.Writer) (string, error) { + if s.req == nil { + req, err := http.NewRequest("GET", s.URL().String(), nil) + if err != nil { + return "", err + } + req.Header.Add("Accept", acceptHeader) + req.Header.Add("Accept-Encoding", "gzip") + req.Header.Set("User-Agent", UserAgent) + req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", strconv.FormatFloat(s.timeout.Seconds(), 'f', -1, 64)) + + s.req = req + } + + resp, err := s.client.Do(s.req.WithContext(ctx)) + if err != nil { + return "", err + } + defer func() { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + }() + + if resp.StatusCode != http.StatusOK { + return "", errors.Errorf("server returned HTTP status %s", resp.Status) + } + + if s.bodySizeLimit <= 0 { + s.bodySizeLimit = math.MaxInt64 + } + if resp.Header.Get("Content-Encoding") != "gzip" { + n, err := io.Copy(w, io.LimitReader(resp.Body, s.bodySizeLimit)) + if err != nil { + return "", err + } + if n >= s.bodySizeLimit { + targetScrapeExceededBodySizeLimit.Inc() + return "", errBodySizeLimit + } + return resp.Header.Get("Content-Type"), nil + } + + if s.gzipr == nil { + s.buf = bufio.NewReader(resp.Body) + s.gzipr, err = gzip.NewReader(s.buf) + if err != nil { + return "", err + } + } else { + s.buf.Reset(resp.Body) + if err = s.gzipr.Reset(s.buf); err != nil { + return "", err + } + } + + n, err := io.Copy(w, io.LimitReader(s.gzipr, s.bodySizeLimit)) + s.gzipr.Close() + if err != nil { + return "", err + } + if n >= s.bodySizeLimit { + targetScrapeExceededBodySizeLimit.Inc() + return "", errBodySizeLimit + } + return resp.Header.Get("Content-Type"), nil +} + +// A loop can run and be stopped again. It must not be reused after it was stopped. +type loop interface { + run(errc chan<- error) + setForcedError(err error) + stop() + getCache() *scrapeCache + disableEndOfRunStalenessMarkers() +} + +type cacheEntry struct { + ref storage.SeriesRef + lastIter uint64 + hash uint64 + lset labels.Labels +} + +type scrapeLoop struct { + scraper scraper + l log.Logger + cache *scrapeCache + lastScrapeSize int + buffers *pool.Pool + jitterSeed uint64 + honorTimestamps bool + forcedErr error + forcedErrMtx sync.Mutex + sampleLimit int + labelLimits *labelLimits + interval time.Duration + timeout time.Duration + + appender func(ctx context.Context) storage.Appender + sampleMutator labelsMutator + reportSampleMutator labelsMutator + + parentCtx context.Context + ctx context.Context + cancel func() + stopped chan struct{} + + disabledEndOfRunStalenessMarkers bool + + reportExtraMetrics bool +} + +// scrapeCache tracks mappings of exposed metric strings to label sets and +// storage references. Additionally, it tracks staleness of series between +// scrapes. +type scrapeCache struct { + iter uint64 // Current scrape iteration. + + // How many series and metadata entries there were at the last success. + successfulCount int + + // Parsed string to an entry with information about the actual label set + // and its storage reference. + series map[string]*cacheEntry + + // Cache of dropped metric strings and their iteration. The iteration must + // be a pointer so we can update it without setting a new entry with an unsafe + // string in addDropped(). + droppedSeries map[string]*uint64 + + // seriesCur and seriesPrev store the labels of series that were seen + // in the current and previous scrape. + // We hold two maps and swap them out to save allocations. + seriesCur map[uint64]labels.Labels + seriesPrev map[uint64]labels.Labels + + metaMtx sync.Mutex + metadata map[string]*metaEntry +} + +// metaEntry holds meta information about a metric. +type metaEntry struct { + lastIter uint64 // Last scrape iteration the entry was observed at. + typ textparse.MetricType + help string + unit string +} + +func (m *metaEntry) size() int { + // The attribute lastIter although part of the struct it is not metadata. + return len(m.help) + len(m.unit) + len(m.typ) +} + +func newScrapeCache() *scrapeCache { + return &scrapeCache{ + series: map[string]*cacheEntry{}, + droppedSeries: map[string]*uint64{}, + seriesCur: map[uint64]labels.Labels{}, + seriesPrev: map[uint64]labels.Labels{}, + metadata: map[string]*metaEntry{}, + } +} + +func (c *scrapeCache) iterDone(flushCache bool) { + c.metaMtx.Lock() + count := len(c.series) + len(c.droppedSeries) + len(c.metadata) + c.metaMtx.Unlock() + + if flushCache { + c.successfulCount = count + } else if count > c.successfulCount*2+1000 { + // If a target had varying labels in scrapes that ultimately failed, + // the caches would grow indefinitely. Force a flush when this happens. + // We use the heuristic that this is a doubling of the cache size + // since the last scrape, and allow an additional 1000 in case + // initial scrapes all fail. + flushCache = true + targetScrapeCacheFlushForced.Inc() + } + + if flushCache { + // All caches may grow over time through series churn + // or multiple string representations of the same metric. Clean up entries + // that haven't appeared in the last scrape. + for s, e := range c.series { + if c.iter != e.lastIter { + delete(c.series, s) + } + } + for s, iter := range c.droppedSeries { + if c.iter != *iter { + delete(c.droppedSeries, s) + } + } + c.metaMtx.Lock() + for m, e := range c.metadata { + // Keep metadata around for 10 scrapes after its metric disappeared. + if c.iter-e.lastIter > 10 { + delete(c.metadata, m) + } + } + c.metaMtx.Unlock() + + c.iter++ + } + + // Swap current and previous series. + c.seriesPrev, c.seriesCur = c.seriesCur, c.seriesPrev + + // We have to delete every single key in the map. + for k := range c.seriesCur { + delete(c.seriesCur, k) + } +} + +func (c *scrapeCache) get(met string) (*cacheEntry, bool) { + e, ok := c.series[met] + if !ok { + return nil, false + } + e.lastIter = c.iter + return e, true +} + +func (c *scrapeCache) addRef(met string, ref storage.SeriesRef, lset labels.Labels, hash uint64) { + if ref == 0 { + return + } + c.series[met] = &cacheEntry{ref: ref, lastIter: c.iter, lset: lset, hash: hash} +} + +func (c *scrapeCache) addDropped(met string) { + iter := c.iter + c.droppedSeries[met] = &iter +} + +func (c *scrapeCache) getDropped(met string) bool { + iterp, ok := c.droppedSeries[met] + if ok { + *iterp = c.iter + } + return ok +} + +func (c *scrapeCache) trackStaleness(hash uint64, lset labels.Labels) { + c.seriesCur[hash] = lset +} + +func (c *scrapeCache) forEachStale(f func(labels.Labels) bool) { + for h, lset := range c.seriesPrev { + if _, ok := c.seriesCur[h]; !ok { + if !f(lset) { + break + } + } + } +} + +func (c *scrapeCache) setType(metric []byte, t textparse.MetricType) { + c.metaMtx.Lock() + + e, ok := c.metadata[yoloString(metric)] + if !ok { + e = &metaEntry{typ: textparse.MetricTypeUnknown} + c.metadata[string(metric)] = e + } + e.typ = t + e.lastIter = c.iter + + c.metaMtx.Unlock() +} + +func (c *scrapeCache) setHelp(metric, help []byte) { + c.metaMtx.Lock() + + e, ok := c.metadata[yoloString(metric)] + if !ok { + e = &metaEntry{typ: textparse.MetricTypeUnknown} + c.metadata[string(metric)] = e + } + if e.help != yoloString(help) { + e.help = string(help) + } + e.lastIter = c.iter + + c.metaMtx.Unlock() +} + +func (c *scrapeCache) setUnit(metric, unit []byte) { + c.metaMtx.Lock() + + e, ok := c.metadata[yoloString(metric)] + if !ok { + e = &metaEntry{typ: textparse.MetricTypeUnknown} + c.metadata[string(metric)] = e + } + if e.unit != yoloString(unit) { + e.unit = string(unit) + } + e.lastIter = c.iter + + c.metaMtx.Unlock() +} + +func (c *scrapeCache) GetMetadata(metric string) (MetricMetadata, bool) { + c.metaMtx.Lock() + defer c.metaMtx.Unlock() + + m, ok := c.metadata[metric] + if !ok { + return MetricMetadata{}, false + } + return MetricMetadata{ + Metric: metric, + Type: m.typ, + Help: m.help, + Unit: m.unit, + }, true +} + +func (c *scrapeCache) ListMetadata() []MetricMetadata { + c.metaMtx.Lock() + defer c.metaMtx.Unlock() + + res := make([]MetricMetadata, 0, len(c.metadata)) + + for m, e := range c.metadata { + res = append(res, MetricMetadata{ + Metric: m, + Type: e.typ, + Help: e.help, + Unit: e.unit, + }) + } + return res +} + +// MetadataSize returns the size of the metadata cache. +func (c *scrapeCache) SizeMetadata() (s int) { + c.metaMtx.Lock() + defer c.metaMtx.Unlock() + for _, e := range c.metadata { + s += e.size() + } + + return s +} + +// MetadataLen returns the number of metadata entries in the cache. +func (c *scrapeCache) LengthMetadata() int { + c.metaMtx.Lock() + defer c.metaMtx.Unlock() + + return len(c.metadata) +} + +func newScrapeLoop(ctx context.Context, + sc scraper, + l log.Logger, + buffers *pool.Pool, + sampleMutator labelsMutator, + reportSampleMutator labelsMutator, + appender func(ctx context.Context) storage.Appender, + cache *scrapeCache, + jitterSeed uint64, + honorTimestamps bool, + sampleLimit int, + labelLimits *labelLimits, + interval time.Duration, + timeout time.Duration, + reportExtraMetrics bool, +) *scrapeLoop { + if l == nil { + l = log.NewNopLogger() + } + if buffers == nil { + buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) + } + if cache == nil { + cache = newScrapeCache() + } + sl := &scrapeLoop{ + scraper: sc, + buffers: buffers, + cache: cache, + appender: appender, + sampleMutator: sampleMutator, + reportSampleMutator: reportSampleMutator, + stopped: make(chan struct{}), + jitterSeed: jitterSeed, + l: l, + parentCtx: ctx, + honorTimestamps: honorTimestamps, + sampleLimit: sampleLimit, + labelLimits: labelLimits, + interval: interval, + timeout: timeout, + reportExtraMetrics: reportExtraMetrics, + } + sl.ctx, sl.cancel = context.WithCancel(ctx) + + return sl +} + +func (sl *scrapeLoop) run(errc chan<- error) { + select { + case <-time.After(sl.scraper.offset(sl.interval, sl.jitterSeed)): + // Continue after a scraping offset. + case <-sl.ctx.Done(): + close(sl.stopped) + return + } + + var last time.Time + + alignedScrapeTime := time.Now().Round(0) + ticker := time.NewTicker(sl.interval) + defer ticker.Stop() + +mainLoop: + for { + select { + case <-sl.parentCtx.Done(): + close(sl.stopped) + return + case <-sl.ctx.Done(): + break mainLoop + default: + } + + // Temporary workaround for a jitter in go timers that causes disk space + // increase in TSDB. + // See https://github.com/prometheus/prometheus/issues/7846 + // Calling Round ensures the time used is the wall clock, as otherwise .Sub + // and .Add on time.Time behave differently (see time package docs). + scrapeTime := time.Now().Round(0) + if AlignScrapeTimestamps && sl.interval > 100*ScrapeTimestampTolerance { + // For some reason, a tick might have been skipped, in which case we + // would call alignedScrapeTime.Add(interval) multiple times. + for scrapeTime.Sub(alignedScrapeTime) >= sl.interval { + alignedScrapeTime = alignedScrapeTime.Add(sl.interval) + } + // Align the scrape time if we are in the tolerance boundaries. + if scrapeTime.Sub(alignedScrapeTime) <= ScrapeTimestampTolerance { + scrapeTime = alignedScrapeTime + } + } + + last = sl.scrapeAndReport(last, scrapeTime, errc) + + select { + case <-sl.parentCtx.Done(): + close(sl.stopped) + return + case <-sl.ctx.Done(): + break mainLoop + case <-ticker.C: + } + } + + close(sl.stopped) + + if !sl.disabledEndOfRunStalenessMarkers { + sl.endOfRunStaleness(last, ticker, sl.interval) + } +} + +// scrapeAndReport performs a scrape and then appends the result to the storage +// together with reporting metrics, by using as few appenders as possible. +// In the happy scenario, a single appender is used. +// This function uses sl.parentCtx instead of sl.ctx on purpose. A scrape should +// only be cancelled on shutdown, not on reloads. +func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- error) time.Time { + start := time.Now() + + // Only record after the first scrape. + if !last.IsZero() { + targetIntervalLength.WithLabelValues(sl.interval.String()).Observe( + time.Since(last).Seconds(), + ) + } + + b := sl.buffers.Get(sl.lastScrapeSize).([]byte) + defer sl.buffers.Put(b) + buf := bytes.NewBuffer(b) + + var total, added, seriesAdded, bytes int + var err, appErr, scrapeErr error + + app := sl.appender(sl.parentCtx) + defer func() { + if err != nil { + app.Rollback() + return + } + err = app.Commit() + if err != nil { + level.Error(sl.l).Log("msg", "Scrape commit failed", "err", err) + } + }() + + defer func() { + if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, bytes, scrapeErr); err != nil { + level.Warn(sl.l).Log("msg", "Appending scrape report failed", "err", err) + } + }() + + if forcedErr := sl.getForcedError(); forcedErr != nil { + scrapeErr = forcedErr + // Add stale markers. + if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil { + app.Rollback() + app = sl.appender(sl.parentCtx) + level.Warn(sl.l).Log("msg", "Append failed", "err", err) + } + if errc != nil { + errc <- forcedErr + } + + return start + } + + var contentType string + scrapeCtx, cancel := context.WithTimeout(sl.parentCtx, sl.timeout) + contentType, scrapeErr = sl.scraper.scrape(scrapeCtx, buf) + cancel() + + if scrapeErr == nil { + b = buf.Bytes() + // NOTE: There were issues with misbehaving clients in the past + // that occasionally returned empty results. We don't want those + // to falsely reset our buffer size. + if len(b) > 0 { + sl.lastScrapeSize = len(b) + } + bytes = len(b) + } else { + level.Debug(sl.l).Log("msg", "Scrape failed", "err", scrapeErr) + if errc != nil { + errc <- scrapeErr + } + if errors.Is(scrapeErr, errBodySizeLimit) { + bytes = -1 + } + } + + // A failed scrape is the same as an empty scrape, + // we still call sl.append to trigger stale markers. + total, added, seriesAdded, appErr = sl.append(app, b, contentType, appendTime) + if appErr != nil { + app.Rollback() + app = sl.appender(sl.parentCtx) + level.Debug(sl.l).Log("msg", "Append failed", "err", appErr) + // The append failed, probably due to a parse error or sample limit. + // Call sl.append again with an empty scrape to trigger stale markers. + if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil { + app.Rollback() + app = sl.appender(sl.parentCtx) + level.Warn(sl.l).Log("msg", "Append failed", "err", err) + } + } + + if scrapeErr == nil { + scrapeErr = appErr + } + + return start +} + +func (sl *scrapeLoop) setForcedError(err error) { + sl.forcedErrMtx.Lock() + defer sl.forcedErrMtx.Unlock() + sl.forcedErr = err +} + +func (sl *scrapeLoop) getForcedError() error { + sl.forcedErrMtx.Lock() + defer sl.forcedErrMtx.Unlock() + return sl.forcedErr +} + +func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, interval time.Duration) { + // Scraping has stopped. We want to write stale markers but + // the target may be recreated, so we wait just over 2 scrape intervals + // before creating them. + // If the context is canceled, we presume the server is shutting down + // and will restart where is was. We do not attempt to write stale markers + // in this case. + + if last.IsZero() { + // There never was a scrape, so there will be no stale markers. + return + } + + // Wait for when the next scrape would have been, record its timestamp. + var staleTime time.Time + select { + case <-sl.parentCtx.Done(): + return + case <-ticker.C: + staleTime = time.Now() + } + + // Wait for when the next scrape would have been, if the target was recreated + // samples should have been ingested by now. + select { + case <-sl.parentCtx.Done(): + return + case <-ticker.C: + } + + // Wait for an extra 10% of the interval, just to be safe. + select { + case <-sl.parentCtx.Done(): + return + case <-time.After(interval / 10): + } + + // Call sl.append again with an empty scrape to trigger stale markers. + // If the target has since been recreated and scraped, the + // stale markers will be out of order and ignored. + app := sl.appender(sl.ctx) + var err error + defer func() { + if err != nil { + app.Rollback() + return + } + err = app.Commit() + if err != nil { + level.Warn(sl.l).Log("msg", "Stale commit failed", "err", err) + } + }() + if _, _, _, err = sl.append(app, []byte{}, "", staleTime); err != nil { + app.Rollback() + app = sl.appender(sl.ctx) + level.Warn(sl.l).Log("msg", "Stale append failed", "err", err) + } + if err = sl.reportStale(app, staleTime); err != nil { + level.Warn(sl.l).Log("msg", "Stale report failed", "err", err) + } +} + +// Stop the scraping. May still write data and stale markers after it has +// returned. Cancel the context to stop all writes. +func (sl *scrapeLoop) stop() { + sl.cancel() + <-sl.stopped +} + +func (sl *scrapeLoop) disableEndOfRunStalenessMarkers() { + sl.disabledEndOfRunStalenessMarkers = true +} + +func (sl *scrapeLoop) getCache() *scrapeCache { + return sl.cache +} + +type appendErrors struct { + numOutOfOrder int + numDuplicates int + numOutOfBounds int + numExemplarOutOfOrder int +} + +func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { + var ( + p = textparse.New(b, contentType) + defTime = timestamp.FromTime(ts) + appErrs = appendErrors{} + sampleLimitErr error + e exemplar.Exemplar // escapes to heap so hoisted out of loop + ) + + defer func() { + if err != nil { + return + } + // Only perform cache cleaning if the scrape was not empty. + // An empty scrape (usually) is used to indicate a failed scrape. + sl.cache.iterDone(len(b) > 0) + }() + +loop: + for { + var ( + et textparse.Entry + sampleAdded bool + ) + if et, err = p.Next(); err != nil { + if err == io.EOF { + err = nil + } + break + } + switch et { + case textparse.EntryType: + sl.cache.setType(p.Type()) + continue + case textparse.EntryHelp: + sl.cache.setHelp(p.Help()) + continue + case textparse.EntryUnit: + sl.cache.setUnit(p.Unit()) + continue + case textparse.EntryComment: + continue + default: + } + total++ + + t := defTime + met, tp, v := p.Series() + if !sl.honorTimestamps { + tp = nil + } + if tp != nil { + t = *tp + } + + if sl.cache.getDropped(yoloString(met)) { + continue + } + ce, ok := sl.cache.get(yoloString(met)) + var ( + ref storage.SeriesRef + lset labels.Labels + mets string + hash uint64 + ) + + if ok { + ref = ce.ref + lset = ce.lset + } else { + mets = p.Metric(&lset) + hash = lset.Hash() + + // Hash label set as it is seen local to the target. Then add target labels + // and relabeling and store the final label set. + lset = sl.sampleMutator(lset) + + // The label set may be set to nil to indicate dropping. + if lset == nil { + sl.cache.addDropped(mets) + continue + } + + if !lset.Has(labels.MetricName) { + err = errNameLabelMandatory + break loop + } + + // If any label limits is exceeded the scrape should fail. + if err = verifyLabelLimits(lset, sl.labelLimits); err != nil { + targetScrapePoolExceededLabelLimits.Inc() + break loop + } + } + + ref, err = app.Append(ref, lset, t, v) + sampleAdded, err = sl.checkAddError(ce, met, tp, err, &sampleLimitErr, &appErrs) + if err != nil { + if err != storage.ErrNotFound { + level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err) + } + break loop + } + + if !ok { + if tp == nil { + // Bypass staleness logic if there is an explicit timestamp. + sl.cache.trackStaleness(hash, lset) + } + sl.cache.addRef(mets, ref, lset, hash) + if sampleAdded && sampleLimitErr == nil { + seriesAdded++ + } + } + + // Increment added even if there's an error so we correctly report the + // number of samples remaining after relabeling. + added++ + + if hasExemplar := p.Exemplar(&e); hasExemplar { + if !e.HasTs { + e.Ts = t + } + _, exemplarErr := app.AppendExemplar(ref, lset, e) + exemplarErr = sl.checkAddExemplarError(exemplarErr, e, &appErrs) + if exemplarErr != nil { + // Since exemplar storage is still experimental, we don't fail the scrape on ingestion errors. + level.Debug(sl.l).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr) + } + e = exemplar.Exemplar{} // reset for next time round loop + } + + } + if sampleLimitErr != nil { + if err == nil { + err = sampleLimitErr + } + // We only want to increment this once per scrape, so this is Inc'd outside the loop. + targetScrapeSampleLimit.Inc() + } + if appErrs.numOutOfOrder > 0 { + level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder) + } + if appErrs.numDuplicates > 0 { + level.Warn(sl.l).Log("msg", "Error on ingesting samples with different value but same timestamp", "num_dropped", appErrs.numDuplicates) + } + if appErrs.numOutOfBounds > 0 { + level.Warn(sl.l).Log("msg", "Error on ingesting samples that are too old or are too far into the future", "num_dropped", appErrs.numOutOfBounds) + } + if appErrs.numExemplarOutOfOrder > 0 { + level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder) + } + if err == nil { + sl.cache.forEachStale(func(lset labels.Labels) bool { + // Series no longer exposed, mark it stale. + _, err = app.Append(0, lset, defTime, math.Float64frombits(value.StaleNaN)) + switch errors.Cause(err) { + case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp: + // Do not count these in logging, as this is expected if a target + // goes away and comes back again with a new scrape loop. + err = nil + } + return err == nil + }) + } + return +} + +func yoloString(b []byte) string { + return *((*string)(unsafe.Pointer(&b))) +} + +// Adds samples to the appender, checking the error, and then returns the # of samples added, +// whether the caller should continue to process more samples, and any sample limit errors. +func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr *error, appErrs *appendErrors) (bool, error) { + switch errors.Cause(err) { + case nil: + if tp == nil && ce != nil { + sl.cache.trackStaleness(ce.hash, ce.lset) + } + return true, nil + case storage.ErrNotFound: + return false, storage.ErrNotFound + case storage.ErrOutOfOrderSample: + appErrs.numOutOfOrder++ + level.Debug(sl.l).Log("msg", "Out of order sample", "series", string(met)) + targetScrapeSampleOutOfOrder.Inc() + return false, nil + case storage.ErrDuplicateSampleForTimestamp: + appErrs.numDuplicates++ + level.Debug(sl.l).Log("msg", "Duplicate sample for timestamp", "series", string(met)) + targetScrapeSampleDuplicate.Inc() + return false, nil + case storage.ErrOutOfBounds: + appErrs.numOutOfBounds++ + level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met)) + targetScrapeSampleOutOfBounds.Inc() + return false, nil + case errSampleLimit: + // Keep on parsing output if we hit the limit, so we report the correct + // total number of samples scraped. + *sampleLimitErr = err + return false, nil + default: + return false, err + } +} + +func (sl *scrapeLoop) checkAddExemplarError(err error, e exemplar.Exemplar, appErrs *appendErrors) error { + switch errors.Cause(err) { + case storage.ErrNotFound: + return storage.ErrNotFound + case storage.ErrOutOfOrderExemplar: + appErrs.numExemplarOutOfOrder++ + level.Debug(sl.l).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e)) + targetScrapeExemplarOutOfOrder.Inc() + return nil + default: + return err + } +} + +// The constants are suffixed with the invalid \xff unicode rune to avoid collisions +// with scraped metrics in the cache. +const ( + scrapeHealthMetricName = "up" + "\xff" + scrapeDurationMetricName = "scrape_duration_seconds" + "\xff" + scrapeSamplesMetricName = "scrape_samples_scraped" + "\xff" + samplesPostRelabelMetricName = "scrape_samples_post_metric_relabeling" + "\xff" + scrapeSeriesAddedMetricName = "scrape_series_added" + "\xff" + scrapeTimeoutMetricName = "scrape_timeout_seconds" + "\xff" + scrapeSampleLimitMetricName = "scrape_sample_limit" + "\xff" + scrapeBodySizeBytesMetricName = "scrape_body_size_bytes" + "\xff" +) + +func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) { + sl.scraper.Report(start, duration, scrapeErr) + + ts := timestamp.FromTime(start) + + var health float64 + if scrapeErr == nil { + health = 1 + } + + if err = sl.addReportSample(app, scrapeHealthMetricName, ts, health); err != nil { + return + } + if err = sl.addReportSample(app, scrapeDurationMetricName, ts, duration.Seconds()); err != nil { + return + } + if err = sl.addReportSample(app, scrapeSamplesMetricName, ts, float64(scraped)); err != nil { + return + } + if err = sl.addReportSample(app, samplesPostRelabelMetricName, ts, float64(added)); err != nil { + return + } + if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, float64(seriesAdded)); err != nil { + return + } + if sl.reportExtraMetrics { + if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, sl.timeout.Seconds()); err != nil { + return + } + if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, float64(sl.sampleLimit)); err != nil { + return + } + if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, float64(bytes)); err != nil { + return + } + } + return +} + +func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err error) { + ts := timestamp.FromTime(start) + + stale := math.Float64frombits(value.StaleNaN) + + if err = sl.addReportSample(app, scrapeHealthMetricName, ts, stale); err != nil { + return + } + if err = sl.addReportSample(app, scrapeDurationMetricName, ts, stale); err != nil { + return + } + if err = sl.addReportSample(app, scrapeSamplesMetricName, ts, stale); err != nil { + return + } + if err = sl.addReportSample(app, samplesPostRelabelMetricName, ts, stale); err != nil { + return + } + if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, stale); err != nil { + return + } + if sl.reportExtraMetrics { + if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, stale); err != nil { + return + } + if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, stale); err != nil { + return + } + if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, stale); err != nil { + return + } + } + return +} + +func (sl *scrapeLoop) addReportSample(app storage.Appender, s string, t int64, v float64) error { + ce, ok := sl.cache.get(s) + var ref storage.SeriesRef + var lset labels.Labels + if ok { + ref = ce.ref + lset = ce.lset + } else { + lset = labels.Labels{ + // The constants are suffixed with the invalid \xff unicode rune to avoid collisions + // with scraped metrics in the cache. + // We have to drop it when building the actual metric. + labels.Label{Name: labels.MetricName, Value: s[:len(s)-1]}, + } + lset = sl.reportSampleMutator(lset) + } + + ref, err := app.Append(ref, lset, t, v) + switch errors.Cause(err) { + case nil: + if !ok { + sl.cache.addRef(s, ref, lset, lset.Hash()) + } + return nil + case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp: + // Do not log here, as this is expected if a target goes away and comes back + // again with a new scrape loop. + return nil + default: + return err + } +} + +// zeroConfig returns a new scrape config that only contains configuration items +// that alter metrics. +func zeroConfig(c *config.ScrapeConfig) *config.ScrapeConfig { + z := *c + // We zero out the fields that for sure don't affect scrape. + z.ScrapeInterval = 0 + z.ScrapeTimeout = 0 + z.SampleLimit = 0 + z.LabelLimit = 0 + z.LabelNameLengthLimit = 0 + z.LabelValueLengthLimit = 0 + z.HTTPClientConfig = config_util.HTTPClientConfig{} + return &z +} + +// reusableCache compares two scrape config and tells whether the cache is still +// valid. +func reusableCache(r, l *config.ScrapeConfig) bool { + if r == nil || l == nil { + return false + } + return reflect.DeepEqual(zeroConfig(r), zeroConfig(l)) +} diff --git a/vendor/github.com/prometheus/prometheus/scrape/target.go b/vendor/github.com/prometheus/prometheus/scrape/target.go new file mode 100644 index 00000000000..59d6c9403c4 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/scrape/target.go @@ -0,0 +1,500 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scrape + +import ( + "fmt" + "hash/fnv" + "net" + "net/url" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/model/value" + "github.com/prometheus/prometheus/storage" +) + +// TargetHealth describes the health state of a target. +type TargetHealth string + +// The possible health states of a target based on the last performed scrape. +const ( + HealthUnknown TargetHealth = "unknown" + HealthGood TargetHealth = "up" + HealthBad TargetHealth = "down" +) + +// Target refers to a singular HTTP or HTTPS endpoint. +type Target struct { + // Labels before any processing. + discoveredLabels labels.Labels + // Any labels that are added to this target and its metrics. + labels labels.Labels + // Additional URL parameters that are part of the target URL. + params url.Values + + mtx sync.RWMutex + lastError error + lastScrape time.Time + lastScrapeDuration time.Duration + health TargetHealth + metadata MetricMetadataStore +} + +// NewTarget creates a reasonably configured target for querying. +func NewTarget(labels, discoveredLabels labels.Labels, params url.Values) *Target { + return &Target{ + labels: labels, + discoveredLabels: discoveredLabels, + params: params, + health: HealthUnknown, + } +} + +func (t *Target) String() string { + return t.URL().String() +} + +// MetricMetadataStore represents a storage for metadata. +type MetricMetadataStore interface { + ListMetadata() []MetricMetadata + GetMetadata(metric string) (MetricMetadata, bool) + SizeMetadata() int + LengthMetadata() int +} + +// MetricMetadata is a piece of metadata for a metric. +type MetricMetadata struct { + Metric string + Type textparse.MetricType + Help string + Unit string +} + +func (t *Target) MetadataList() []MetricMetadata { + t.mtx.RLock() + defer t.mtx.RUnlock() + + if t.metadata == nil { + return nil + } + return t.metadata.ListMetadata() +} + +func (t *Target) MetadataSize() int { + t.mtx.RLock() + defer t.mtx.RUnlock() + + if t.metadata == nil { + return 0 + } + + return t.metadata.SizeMetadata() +} + +func (t *Target) MetadataLength() int { + t.mtx.RLock() + defer t.mtx.RUnlock() + + if t.metadata == nil { + return 0 + } + + return t.metadata.LengthMetadata() +} + +// Metadata returns type and help metadata for the given metric. +func (t *Target) Metadata(metric string) (MetricMetadata, bool) { + t.mtx.RLock() + defer t.mtx.RUnlock() + + if t.metadata == nil { + return MetricMetadata{}, false + } + return t.metadata.GetMetadata(metric) +} + +func (t *Target) SetMetadataStore(s MetricMetadataStore) { + t.mtx.Lock() + defer t.mtx.Unlock() + t.metadata = s +} + +// hash returns an identifying hash for the target. +func (t *Target) hash() uint64 { + h := fnv.New64a() + + //nolint: errcheck + h.Write([]byte(fmt.Sprintf("%016d", t.labels.Hash()))) + //nolint: errcheck + h.Write([]byte(t.URL().String())) + + return h.Sum64() +} + +// offset returns the time until the next scrape cycle for the target. +// It includes the global server jitterSeed for scrapes from multiple Prometheus to try to be at different times. +func (t *Target) offset(interval time.Duration, jitterSeed uint64) time.Duration { + now := time.Now().UnixNano() + + // Base is a pinned to absolute time, no matter how often offset is called. + var ( + base = int64(interval) - now%int64(interval) + offset = (t.hash() ^ jitterSeed) % uint64(interval) + next = base + int64(offset) + ) + + if next > int64(interval) { + next -= int64(interval) + } + return time.Duration(next) +} + +// Labels returns a copy of the set of all public labels of the target. +func (t *Target) Labels() labels.Labels { + lset := make(labels.Labels, 0, len(t.labels)) + for _, l := range t.labels { + if !strings.HasPrefix(l.Name, model.ReservedLabelPrefix) { + lset = append(lset, l) + } + } + return lset +} + +// DiscoveredLabels returns a copy of the target's labels before any processing. +func (t *Target) DiscoveredLabels() labels.Labels { + t.mtx.Lock() + defer t.mtx.Unlock() + lset := make(labels.Labels, len(t.discoveredLabels)) + copy(lset, t.discoveredLabels) + return lset +} + +// SetDiscoveredLabels sets new DiscoveredLabels +func (t *Target) SetDiscoveredLabels(l labels.Labels) { + t.mtx.Lock() + defer t.mtx.Unlock() + t.discoveredLabels = l +} + +// URL returns a copy of the target's URL. +func (t *Target) URL() *url.URL { + params := url.Values{} + + for k, v := range t.params { + params[k] = make([]string, len(v)) + copy(params[k], v) + } + for _, l := range t.labels { + if !strings.HasPrefix(l.Name, model.ParamLabelPrefix) { + continue + } + ks := l.Name[len(model.ParamLabelPrefix):] + + if len(params[ks]) > 0 { + params[ks][0] = l.Value + } else { + params[ks] = []string{l.Value} + } + } + + return &url.URL{ + Scheme: t.labels.Get(model.SchemeLabel), + Host: t.labels.Get(model.AddressLabel), + Path: t.labels.Get(model.MetricsPathLabel), + RawQuery: params.Encode(), + } +} + +// Report sets target data about the last scrape. +func (t *Target) Report(start time.Time, dur time.Duration, err error) { + t.mtx.Lock() + defer t.mtx.Unlock() + + if err == nil { + t.health = HealthGood + } else { + t.health = HealthBad + } + + t.lastError = err + t.lastScrape = start + t.lastScrapeDuration = dur +} + +// LastError returns the error encountered during the last scrape. +func (t *Target) LastError() error { + t.mtx.RLock() + defer t.mtx.RUnlock() + + return t.lastError +} + +// LastScrape returns the time of the last scrape. +func (t *Target) LastScrape() time.Time { + t.mtx.RLock() + defer t.mtx.RUnlock() + + return t.lastScrape +} + +// LastScrapeDuration returns how long the last scrape of the target took. +func (t *Target) LastScrapeDuration() time.Duration { + t.mtx.RLock() + defer t.mtx.RUnlock() + + return t.lastScrapeDuration +} + +// Health returns the last known health state of the target. +func (t *Target) Health() TargetHealth { + t.mtx.RLock() + defer t.mtx.RUnlock() + + return t.health +} + +// intervalAndTimeout returns the interval and timeout derived from +// the targets labels. +func (t *Target) intervalAndTimeout(defaultInterval, defaultDuration time.Duration) (time.Duration, time.Duration, error) { + t.mtx.RLock() + defer t.mtx.RUnlock() + + intervalLabel := t.labels.Get(model.ScrapeIntervalLabel) + interval, err := model.ParseDuration(intervalLabel) + if err != nil { + return defaultInterval, defaultDuration, errors.Errorf("Error parsing interval label %q: %v", intervalLabel, err) + } + timeoutLabel := t.labels.Get(model.ScrapeTimeoutLabel) + timeout, err := model.ParseDuration(timeoutLabel) + if err != nil { + return defaultInterval, defaultDuration, errors.Errorf("Error parsing timeout label %q: %v", timeoutLabel, err) + } + + return time.Duration(interval), time.Duration(timeout), nil +} + +// GetValue gets a label value from the entire label set. +func (t *Target) GetValue(name string) string { + return t.labels.Get(name) +} + +// Targets is a sortable list of targets. +type Targets []*Target + +func (ts Targets) Len() int { return len(ts) } +func (ts Targets) Less(i, j int) bool { return ts[i].URL().String() < ts[j].URL().String() } +func (ts Targets) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } + +var errSampleLimit = errors.New("sample limit exceeded") + +// limitAppender limits the number of total appended samples in a batch. +type limitAppender struct { + storage.Appender + + limit int + i int +} + +func (app *limitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { + if !value.IsStaleNaN(v) { + app.i++ + if app.i > app.limit { + return 0, errSampleLimit + } + } + ref, err := app.Appender.Append(ref, lset, t, v) + if err != nil { + return 0, err + } + return ref, nil +} + +type timeLimitAppender struct { + storage.Appender + + maxTime int64 +} + +func (app *timeLimitAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { + if t > app.maxTime { + return 0, storage.ErrOutOfBounds + } + + ref, err := app.Appender.Append(ref, lset, t, v) + if err != nil { + return 0, err + } + return ref, nil +} + +// PopulateLabels builds a label set from the given label set and scrape configuration. +// It returns a label set before relabeling was applied as the second return value. +// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. +func PopulateLabels(lset labels.Labels, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) { + // Copy labels into the labelset for the target if they are not set already. + scrapeLabels := []labels.Label{ + {Name: model.JobLabel, Value: cfg.JobName}, + {Name: model.ScrapeIntervalLabel, Value: cfg.ScrapeInterval.String()}, + {Name: model.ScrapeTimeoutLabel, Value: cfg.ScrapeTimeout.String()}, + {Name: model.MetricsPathLabel, Value: cfg.MetricsPath}, + {Name: model.SchemeLabel, Value: cfg.Scheme}, + } + lb := labels.NewBuilder(lset) + + for _, l := range scrapeLabels { + if lv := lset.Get(l.Name); lv == "" { + lb.Set(l.Name, l.Value) + } + } + // Encode scrape query parameters as labels. + for k, v := range cfg.Params { + if len(v) > 0 { + lb.Set(model.ParamLabelPrefix+k, v[0]) + } + } + + preRelabelLabels := lb.Labels() + lset = relabel.Process(preRelabelLabels, cfg.RelabelConfigs...) + + // Check if the target was dropped. + if lset == nil { + return nil, preRelabelLabels, nil + } + if v := lset.Get(model.AddressLabel); v == "" { + return nil, nil, errors.New("no address") + } + + lb = labels.NewBuilder(lset) + + // addPort checks whether we should add a default port to the address. + // If the address is not valid, we don't append a port either. + addPort := func(s string) bool { + // If we can split, a port exists and we don't have to add one. + if _, _, err := net.SplitHostPort(s); err == nil { + return false + } + // If adding a port makes it valid, the previous error + // was not due to an invalid address and we can append a port. + _, _, err := net.SplitHostPort(s + ":1234") + return err == nil + } + addr := lset.Get(model.AddressLabel) + // If it's an address with no trailing port, infer it based on the used scheme. + if addPort(addr) { + // Addresses reaching this point are already wrapped in [] if necessary. + switch lset.Get(model.SchemeLabel) { + case "http", "": + addr = addr + ":80" + case "https": + addr = addr + ":443" + default: + return nil, nil, errors.Errorf("invalid scheme: %q", cfg.Scheme) + } + lb.Set(model.AddressLabel, addr) + } + + if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { + return nil, nil, err + } + + var interval string + var intervalDuration model.Duration + if interval = lset.Get(model.ScrapeIntervalLabel); interval != cfg.ScrapeInterval.String() { + intervalDuration, err = model.ParseDuration(interval) + if err != nil { + return nil, nil, errors.Errorf("error parsing scrape interval: %v", err) + } + if time.Duration(intervalDuration) == 0 { + return nil, nil, errors.New("scrape interval cannot be 0") + } + } + + var timeout string + var timeoutDuration model.Duration + if timeout = lset.Get(model.ScrapeTimeoutLabel); timeout != cfg.ScrapeTimeout.String() { + timeoutDuration, err = model.ParseDuration(timeout) + if err != nil { + return nil, nil, errors.Errorf("error parsing scrape timeout: %v", err) + } + if time.Duration(timeoutDuration) == 0 { + return nil, nil, errors.New("scrape timeout cannot be 0") + } + } + + if timeoutDuration > intervalDuration { + return nil, nil, errors.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval) + } + + // Meta labels are deleted after relabelling. Other internal labels propagate to + // the target which decides whether they will be part of their label set. + for _, l := range lset { + if strings.HasPrefix(l.Name, model.MetaLabelPrefix) { + lb.Del(l.Name) + } + } + + // Default the instance label to the target address. + if v := lset.Get(model.InstanceLabel); v == "" { + lb.Set(model.InstanceLabel, addr) + } + + res = lb.Labels() + for _, l := range res { + // Check label values are valid, drop the target if not. + if !model.LabelValue(l.Value).IsValid() { + return nil, nil, errors.Errorf("invalid label value for %q: %q", l.Name, l.Value) + } + } + return res, preRelabelLabels, nil +} + +// TargetsFromGroup builds targets based on the given TargetGroup and config. +func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig) ([]*Target, []error) { + targets := make([]*Target, 0, len(tg.Targets)) + failures := []error{} + + for i, tlset := range tg.Targets { + lbls := make([]labels.Label, 0, len(tlset)+len(tg.Labels)) + + for ln, lv := range tlset { + lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)}) + } + for ln, lv := range tg.Labels { + if _, ok := tlset[ln]; !ok { + lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)}) + } + } + + lset := labels.New(lbls...) + + lbls, origLabels, err := PopulateLabels(lset, cfg) + if err != nil { + failures = append(failures, errors.Wrapf(err, "instance %d in group %s", i, tg)) + } + if lbls != nil || origLabels != nil { + targets = append(targets, NewTarget(lbls, origLabels, cfg.Params)) + } + } + return targets, failures +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/chunked.go b/vendor/github.com/prometheus/prometheus/storage/remote/chunked.go new file mode 100644 index 00000000000..670a16834f1 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/storage/remote/chunked.go @@ -0,0 +1,155 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bufio" + "encoding/binary" + "hash" + "hash/crc32" + "io" + "net/http" + + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" +) + +// DefaultChunkedReadLimit is the default value for the maximum size of the protobuf frame client allows. +// 50MB is the default. This is equivalent to ~100k full XOR chunks and average labelset. +const DefaultChunkedReadLimit = 5e+7 + +// The table gets initialized with sync.Once but may still cause a race +// with any other use of the crc32 package anywhere. Thus we initialize it +// before. +var castagnoliTable *crc32.Table + +func init() { + castagnoliTable = crc32.MakeTable(crc32.Castagnoli) +} + +// ChunkedWriter is an io.Writer wrapper that allows streaming by adding uvarint delimiter before each write in a form +// of length of the corresponded byte array. +type ChunkedWriter struct { + writer io.Writer + flusher http.Flusher + + crc32 hash.Hash32 +} + +// NewChunkedWriter constructs a ChunkedWriter. +func NewChunkedWriter(w io.Writer, f http.Flusher) *ChunkedWriter { + return &ChunkedWriter{writer: w, flusher: f, crc32: crc32.New(castagnoliTable)} +} + +// Write writes given bytes to the stream and flushes it. +// Each frame includes: +// +// 1. uvarint for the size of the data frame. +// 2. big-endian uint32 for the Castagnoli polynomial CRC-32 checksum of the data frame. +// 3. the bytes of the given data. +// +// Write returns number of sent bytes for a given buffer. The number does not include delimiter and checksum bytes. +func (w *ChunkedWriter) Write(b []byte) (int, error) { + if len(b) == 0 { + return 0, nil + } + + var buf [binary.MaxVarintLen64]byte + v := binary.PutUvarint(buf[:], uint64(len(b))) + if _, err := w.writer.Write(buf[:v]); err != nil { + return 0, err + } + + w.crc32.Reset() + if _, err := w.crc32.Write(b); err != nil { + return 0, err + } + + if err := binary.Write(w.writer, binary.BigEndian, w.crc32.Sum32()); err != nil { + return 0, err + } + + n, err := w.writer.Write(b) + if err != nil { + return n, err + } + + w.flusher.Flush() + return n, nil +} + +// ChunkedReader is a buffered reader that expects uvarint delimiter and checksum before each message. +// It will allocate as much as the biggest frame defined by delimiter (on top of bufio.Reader allocations). +type ChunkedReader struct { + b *bufio.Reader + data []byte + sizeLimit uint64 + + crc32 hash.Hash32 +} + +// NewChunkedReader constructs a ChunkedReader. +// It allows passing data slice for byte slice reuse, which will be increased to needed size if smaller. +func NewChunkedReader(r io.Reader, sizeLimit uint64, data []byte) *ChunkedReader { + return &ChunkedReader{b: bufio.NewReader(r), sizeLimit: sizeLimit, data: data, crc32: crc32.New(castagnoliTable)} +} + +// Next returns the next length-delimited record from the input, or io.EOF if +// there are no more records available. Returns io.ErrUnexpectedEOF if a short +// record is found, with a length of n but fewer than n bytes of data. +// Next also verifies the given checksum with Castagnoli polynomial CRC-32 checksum. +// +// NOTE: The slice returned is valid only until a subsequent call to Next. It's a caller's responsibility to copy the +// returned slice if needed. +func (r *ChunkedReader) Next() ([]byte, error) { + size, err := binary.ReadUvarint(r.b) + if err != nil { + return nil, err + } + + if size > r.sizeLimit { + return nil, errors.Errorf("chunkedReader: message size exceeded the limit %v bytes; got: %v bytes", r.sizeLimit, size) + } + + if cap(r.data) < int(size) { + r.data = make([]byte, size) + } else { + r.data = r.data[:size] + } + + var crc32 uint32 + if err := binary.Read(r.b, binary.BigEndian, &crc32); err != nil { + return nil, err + } + + r.crc32.Reset() + if _, err := io.ReadFull(io.TeeReader(r.b, r.crc32), r.data); err != nil { + return nil, err + } + + if r.crc32.Sum32() != crc32 { + return nil, errors.New("chunkedReader: corrupted frame; checksum mismatch") + } + return r.data, nil +} + +// NextProto consumes the next available record by calling r.Next, and decodes +// it into the protobuf with proto.Unmarshal. +func (r *ChunkedReader) NextProto(pb proto.Message) error { + rec, err := r.Next() + if err != nil { + return err + } + return proto.Unmarshal(rec, pb) +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/client.go b/vendor/github.com/prometheus/prometheus/storage/remote/client.go new file mode 100644 index 00000000000..7539c2c92e7 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/storage/remote/client.go @@ -0,0 +1,357 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" + "github.com/opentracing-contrib/go-stdlib/nethttp" + "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/prometheus/common/sigv4" + "github.com/prometheus/common/version" + + "github.com/prometheus/prometheus/prompb" +) + +const maxErrMsgLen = 1024 + +var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version) + +var ( + remoteReadQueriesTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "read_queries_total", + Help: "The total number of remote read queries.", + }, + []string{remoteName, endpoint, "code"}, + ) + remoteReadQueries = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "remote_read_queries", + Help: "The number of in-flight remote read queries.", + }, + []string{remoteName, endpoint}, + ) + remoteReadQueryDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "read_request_duration_seconds", + Help: "Histogram of the latency for remote read requests.", + Buckets: append(prometheus.DefBuckets, 25, 60), + }, + []string{remoteName, endpoint}, + ) +) + +func init() { + prometheus.MustRegister(remoteReadQueriesTotal, remoteReadQueries, remoteReadQueryDuration) +} + +// Client allows reading and writing from/to a remote HTTP endpoint. +type Client struct { + remoteName string // Used to differentiate clients in metrics. + url *config_util.URL + Client *http.Client + timeout time.Duration + + retryOnRateLimit bool + + readQueries prometheus.Gauge + readQueriesTotal *prometheus.CounterVec + readQueriesDuration prometheus.Observer +} + +// ClientConfig configures a client. +type ClientConfig struct { + URL *config_util.URL + Timeout model.Duration + HTTPClientConfig config_util.HTTPClientConfig + SigV4Config *sigv4.SigV4Config + Headers map[string]string + RetryOnRateLimit bool +} + +// ReadClient uses the SAMPLES method of remote read to read series samples from remote server. +// TODO(bwplotka): Add streamed chunked remote read method as well (https://github.com/prometheus/prometheus/issues/5926). +type ReadClient interface { + Read(ctx context.Context, query *prompb.Query) (*prompb.QueryResult, error) +} + +// NewReadClient creates a new client for remote read. +func NewReadClient(name string, conf *ClientConfig) (ReadClient, error) { + httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_read_client") + if err != nil { + return nil, err + } + + t := httpClient.Transport + if len(conf.Headers) > 0 { + t = newInjectHeadersRoundTripper(conf.Headers, t) + } + httpClient.Transport = &nethttp.Transport{ + RoundTripper: t, + } + + return &Client{ + remoteName: name, + url: conf.URL, + Client: httpClient, + timeout: time.Duration(conf.Timeout), + readQueries: remoteReadQueries.WithLabelValues(name, conf.URL.String()), + readQueriesTotal: remoteReadQueriesTotal.MustCurryWith(prometheus.Labels{remoteName: name, endpoint: conf.URL.String()}), + readQueriesDuration: remoteReadQueryDuration.WithLabelValues(name, conf.URL.String()), + }, nil +} + +// NewWriteClient creates a new client for remote write. +func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) { + httpClient, err := config_util.NewClientFromConfig(conf.HTTPClientConfig, "remote_storage_write_client") + if err != nil { + return nil, err + } + t := httpClient.Transport + + if conf.SigV4Config != nil { + t, err = sigv4.NewSigV4RoundTripper(conf.SigV4Config, httpClient.Transport) + if err != nil { + return nil, err + } + } + + if len(conf.Headers) > 0 { + t = newInjectHeadersRoundTripper(conf.Headers, t) + } + + httpClient.Transport = &nethttp.Transport{ + RoundTripper: t, + } + + return &Client{ + remoteName: name, + url: conf.URL, + Client: httpClient, + retryOnRateLimit: conf.RetryOnRateLimit, + timeout: time.Duration(conf.Timeout), + }, nil +} + +func newInjectHeadersRoundTripper(h map[string]string, underlyingRT http.RoundTripper) *injectHeadersRoundTripper { + return &injectHeadersRoundTripper{headers: h, RoundTripper: underlyingRT} +} + +type injectHeadersRoundTripper struct { + headers map[string]string + http.RoundTripper +} + +func (t *injectHeadersRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + for key, value := range t.headers { + req.Header.Set(key, value) + } + return t.RoundTripper.RoundTrip(req) +} + +const defaultBackoff = 0 + +type RecoverableError struct { + error + retryAfter model.Duration +} + +// Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled +// and encoded bytes from codec.go. +func (c *Client) Store(ctx context.Context, req []byte) error { + httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(req)) + if err != nil { + // Errors from NewRequest are from unparsable URLs, so are not + // recoverable. + return err + } + + httpReq.Header.Add("Content-Encoding", "snappy") + httpReq.Header.Set("Content-Type", "application/x-protobuf") + httpReq.Header.Set("User-Agent", UserAgent) + httpReq.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + + httpReq = httpReq.WithContext(ctx) + + if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil { + var ht *nethttp.Tracer + httpReq, ht = nethttp.TraceRequest( + parentSpan.Tracer(), + httpReq, + nethttp.OperationName("Remote Store"), + nethttp.ClientTrace(false), + ) + defer ht.Finish() + } + + httpResp, err := c.Client.Do(httpReq) + if err != nil { + // Errors from Client.Do are from (for example) network errors, so are + // recoverable. + return RecoverableError{err, defaultBackoff} + } + defer func() { + io.Copy(ioutil.Discard, httpResp.Body) + httpResp.Body.Close() + }() + + if httpResp.StatusCode/100 != 2 { + scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, maxErrMsgLen)) + line := "" + if scanner.Scan() { + line = scanner.Text() + } + err = errors.Errorf("server returned HTTP status %s: %s", httpResp.Status, line) + } + if httpResp.StatusCode/100 == 5 { + return RecoverableError{err, defaultBackoff} + } + if c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests { + return RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))} + } + return err +} + +// retryAfterDuration returns the duration for the Retry-After header. In case of any errors, it +// returns the defaultBackoff as if the header was never supplied. +func retryAfterDuration(t string) model.Duration { + parsedDuration, err := time.Parse(http.TimeFormat, t) + if err == nil { + s := time.Until(parsedDuration).Seconds() + return model.Duration(s) * model.Duration(time.Second) + } + // The duration can be in seconds. + d, err := strconv.Atoi(t) + if err != nil { + return defaultBackoff + } + return model.Duration(d) * model.Duration(time.Second) +} + +// Name uniquely identifies the client. +func (c Client) Name() string { + return c.remoteName +} + +// Endpoint is the remote read or write endpoint. +func (c Client) Endpoint() string { + return c.url.String() +} + +// Read reads from a remote endpoint. +func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryResult, error) { + c.readQueries.Inc() + defer c.readQueries.Dec() + + req := &prompb.ReadRequest{ + // TODO: Support batching multiple queries into one read request, + // as the protobuf interface allows for it. + Queries: []*prompb.Query{ + query, + }, + } + data, err := proto.Marshal(req) + if err != nil { + return nil, errors.Wrapf(err, "unable to marshal read request") + } + + compressed := snappy.Encode(nil, data) + httpReq, err := http.NewRequest("POST", c.url.String(), bytes.NewReader(compressed)) + if err != nil { + return nil, errors.Wrap(err, "unable to create request") + } + httpReq.Header.Add("Content-Encoding", "snappy") + httpReq.Header.Add("Accept-Encoding", "snappy") + httpReq.Header.Set("Content-Type", "application/x-protobuf") + httpReq.Header.Set("User-Agent", UserAgent) + httpReq.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0") + + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + + httpReq = httpReq.WithContext(ctx) + + if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil { + var ht *nethttp.Tracer + httpReq, ht = nethttp.TraceRequest( + parentSpan.Tracer(), + httpReq, + nethttp.OperationName("Remote Read"), + nethttp.ClientTrace(false), + ) + defer ht.Finish() + } + + start := time.Now() + httpResp, err := c.Client.Do(httpReq) + if err != nil { + return nil, errors.Wrap(err, "error sending request") + } + defer func() { + io.Copy(ioutil.Discard, httpResp.Body) + httpResp.Body.Close() + }() + c.readQueriesDuration.Observe(time.Since(start).Seconds()) + c.readQueriesTotal.WithLabelValues(strconv.Itoa(httpResp.StatusCode)).Inc() + + compressed, err = ioutil.ReadAll(httpResp.Body) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("error reading response. HTTP status code: %s", httpResp.Status)) + } + + if httpResp.StatusCode/100 != 2 { + return nil, errors.Errorf("remote server %s returned HTTP status %s: %s", c.url.String(), httpResp.Status, strings.TrimSpace(string(compressed))) + } + + uncompressed, err := snappy.Decode(nil, compressed) + if err != nil { + return nil, errors.Wrap(err, "error reading response") + } + + var resp prompb.ReadResponse + err = proto.Unmarshal(uncompressed, &resp) + if err != nil { + return nil, errors.Wrap(err, "unable to unmarshal response body") + } + + if len(resp.Results) != len(req.Queries) { + return nil, errors.Errorf("responses: want %d, got %d", len(req.Queries), len(resp.Results)) + } + + return resp.Results[0], nil +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go new file mode 100644 index 00000000000..a9f6af0f38d --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go @@ -0,0 +1,532 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "sort" + "strings" + + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/textparse" + "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" +) + +// decodeReadLimit is the maximum size of a read request body in bytes. +const decodeReadLimit = 32 * 1024 * 1024 + +type HTTPError struct { + msg string + status int +} + +func (e HTTPError) Error() string { + return e.msg +} + +func (e HTTPError) Status() int { + return e.status +} + +// DecodeReadRequest reads a remote.Request from a http.Request. +func DecodeReadRequest(r *http.Request) (*prompb.ReadRequest, error) { + compressed, err := ioutil.ReadAll(io.LimitReader(r.Body, decodeReadLimit)) + if err != nil { + return nil, err + } + + reqBuf, err := snappy.Decode(nil, compressed) + if err != nil { + return nil, err + } + + var req prompb.ReadRequest + if err := proto.Unmarshal(reqBuf, &req); err != nil { + return nil, err + } + + return &req, nil +} + +// EncodeReadResponse writes a remote.Response to a http.ResponseWriter. +func EncodeReadResponse(resp *prompb.ReadResponse, w http.ResponseWriter) error { + data, err := proto.Marshal(resp) + if err != nil { + return err + } + + compressed := snappy.Encode(nil, data) + _, err = w.Write(compressed) + return err +} + +// ToQuery builds a Query proto. +func ToQuery(from, to int64, matchers []*labels.Matcher, hints *storage.SelectHints) (*prompb.Query, error) { + ms, err := toLabelMatchers(matchers) + if err != nil { + return nil, err + } + + var rp *prompb.ReadHints + if hints != nil { + rp = &prompb.ReadHints{ + StartMs: hints.Start, + EndMs: hints.End, + StepMs: hints.Step, + Func: hints.Func, + Grouping: hints.Grouping, + By: hints.By, + RangeMs: hints.Range, + } + } + + return &prompb.Query{ + StartTimestampMs: from, + EndTimestampMs: to, + Matchers: ms, + Hints: rp, + }, nil +} + +// ToQueryResult builds a QueryResult proto. +func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult, storage.Warnings, error) { + numSamples := 0 + resp := &prompb.QueryResult{} + for ss.Next() { + series := ss.At() + iter := series.Iterator() + samples := []prompb.Sample{} + + for iter.Next() { + numSamples++ + if sampleLimit > 0 && numSamples > sampleLimit { + return nil, ss.Warnings(), HTTPError{ + msg: fmt.Sprintf("exceeded sample limit (%d)", sampleLimit), + status: http.StatusBadRequest, + } + } + ts, val := iter.At() + samples = append(samples, prompb.Sample{ + Timestamp: ts, + Value: val, + }) + } + if err := iter.Err(); err != nil { + return nil, ss.Warnings(), err + } + + resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{ + Labels: labelsToLabelsProto(series.Labels(), nil), + Samples: samples, + }) + } + return resp, ss.Warnings(), ss.Err() +} + +// FromQueryResult unpacks and sorts a QueryResult proto. +func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet { + series := make([]storage.Series, 0, len(res.Timeseries)) + for _, ts := range res.Timeseries { + lbls := labelProtosToLabels(ts.Labels) + if err := validateLabelsAndMetricName(lbls); err != nil { + return errSeriesSet{err: err} + } + series = append(series, &concreteSeries{labels: lbls, samples: ts.Samples}) + } + + if sortSeries { + sort.Sort(byLabel(series)) + } + return &concreteSeriesSet{ + series: series, + } +} + +// NegotiateResponseType returns first accepted response type that this server supports. +// On the empty accepted list we assume that the SAMPLES response type was requested. This is to maintain backward compatibility. +func NegotiateResponseType(accepted []prompb.ReadRequest_ResponseType) (prompb.ReadRequest_ResponseType, error) { + if len(accepted) == 0 { + accepted = []prompb.ReadRequest_ResponseType{prompb.ReadRequest_SAMPLES} + } + + supported := map[prompb.ReadRequest_ResponseType]struct{}{ + prompb.ReadRequest_SAMPLES: {}, + prompb.ReadRequest_STREAMED_XOR_CHUNKS: {}, + } + + for _, resType := range accepted { + if _, ok := supported[resType]; ok { + return resType, nil + } + } + return 0, errors.Errorf("server does not support any of the requested response types: %v; supported: %v", accepted, supported) +} + +// StreamChunkedReadResponses iterates over series, builds chunks and streams those to the caller. +// It expects Series set with populated chunks. +func StreamChunkedReadResponses( + stream io.Writer, + queryIndex int64, + ss storage.ChunkSeriesSet, + sortedExternalLabels []prompb.Label, + maxBytesInFrame int, +) (storage.Warnings, error) { + var ( + chks []prompb.Chunk + lbls []prompb.Label + ) + + for ss.Next() { + series := ss.At() + iter := series.Iterator() + lbls = MergeLabels(labelsToLabelsProto(series.Labels(), lbls), sortedExternalLabels) + + frameBytesLeft := maxBytesInFrame + for _, lbl := range lbls { + frameBytesLeft -= lbl.Size() + } + + isNext := iter.Next() + + // Send at most one series per frame; series may be split over multiple frames according to maxBytesInFrame. + for isNext { + chk := iter.At() + + if chk.Chunk == nil { + return ss.Warnings(), errors.Errorf("StreamChunkedReadResponses: found not populated chunk returned by SeriesSet at ref: %v", chk.Ref) + } + + // Cut the chunk. + chks = append(chks, prompb.Chunk{ + MinTimeMs: chk.MinTime, + MaxTimeMs: chk.MaxTime, + Type: prompb.Chunk_Encoding(chk.Chunk.Encoding()), + Data: chk.Chunk.Bytes(), + }) + frameBytesLeft -= chks[len(chks)-1].Size() + + // We are fine with minor inaccuracy of max bytes per frame. The inaccuracy will be max of full chunk size. + isNext = iter.Next() + if frameBytesLeft > 0 && isNext { + continue + } + + b, err := proto.Marshal(&prompb.ChunkedReadResponse{ + ChunkedSeries: []*prompb.ChunkedSeries{ + {Labels: lbls, Chunks: chks}, + }, + QueryIndex: queryIndex, + }) + if err != nil { + return ss.Warnings(), errors.Wrap(err, "marshal ChunkedReadResponse") + } + + if _, err := stream.Write(b); err != nil { + return ss.Warnings(), errors.Wrap(err, "write to stream") + } + chks = chks[:0] + } + if err := iter.Err(); err != nil { + return ss.Warnings(), err + } + } + return ss.Warnings(), ss.Err() +} + +// MergeLabels merges two sets of sorted proto labels, preferring those in +// primary to those in secondary when there is an overlap. +func MergeLabels(primary, secondary []prompb.Label) []prompb.Label { + result := make([]prompb.Label, 0, len(primary)+len(secondary)) + i, j := 0, 0 + for i < len(primary) && j < len(secondary) { + if primary[i].Name < secondary[j].Name { + result = append(result, primary[i]) + i++ + } else if primary[i].Name > secondary[j].Name { + result = append(result, secondary[j]) + j++ + } else { + result = append(result, primary[i]) + i++ + j++ + } + } + for ; i < len(primary); i++ { + result = append(result, primary[i]) + } + for ; j < len(secondary); j++ { + result = append(result, secondary[j]) + } + return result +} + +type byLabel []storage.Series + +func (a byLabel) Len() int { return len(a) } +func (a byLabel) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byLabel) Less(i, j int) bool { return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 } + +// errSeriesSet implements storage.SeriesSet, just returning an error. +type errSeriesSet struct { + err error +} + +func (errSeriesSet) Next() bool { + return false +} + +func (errSeriesSet) At() storage.Series { + return nil +} + +func (e errSeriesSet) Err() error { + return e.err +} + +func (e errSeriesSet) Warnings() storage.Warnings { return nil } + +// concreteSeriesSet implements storage.SeriesSet. +type concreteSeriesSet struct { + cur int + series []storage.Series +} + +func (c *concreteSeriesSet) Next() bool { + c.cur++ + return c.cur-1 < len(c.series) +} + +func (c *concreteSeriesSet) At() storage.Series { + return c.series[c.cur-1] +} + +func (c *concreteSeriesSet) Err() error { + return nil +} + +func (c *concreteSeriesSet) Warnings() storage.Warnings { return nil } + +// concreteSeries implements storage.Series. +type concreteSeries struct { + labels labels.Labels + samples []prompb.Sample +} + +func (c *concreteSeries) Labels() labels.Labels { + return labels.New(c.labels...) +} + +func (c *concreteSeries) Iterator() chunkenc.Iterator { + return newConcreteSeriersIterator(c) +} + +// concreteSeriesIterator implements storage.SeriesIterator. +type concreteSeriesIterator struct { + cur int + series *concreteSeries +} + +func newConcreteSeriersIterator(series *concreteSeries) chunkenc.Iterator { + return &concreteSeriesIterator{ + cur: -1, + series: series, + } +} + +// Seek implements storage.SeriesIterator. +func (c *concreteSeriesIterator) Seek(t int64) bool { + c.cur = sort.Search(len(c.series.samples), func(n int) bool { + return c.series.samples[n].Timestamp >= t + }) + return c.cur < len(c.series.samples) +} + +// At implements storage.SeriesIterator. +func (c *concreteSeriesIterator) At() (t int64, v float64) { + s := c.series.samples[c.cur] + return s.Timestamp, s.Value +} + +// Next implements storage.SeriesIterator. +func (c *concreteSeriesIterator) Next() bool { + c.cur++ + return c.cur < len(c.series.samples) +} + +// Err implements storage.SeriesIterator. +func (c *concreteSeriesIterator) Err() error { + return nil +} + +// validateLabelsAndMetricName validates the label names/values and metric names returned from remote read, +// also making sure that there are no labels with duplicate names +func validateLabelsAndMetricName(ls labels.Labels) error { + for i, l := range ls { + if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) { + return errors.Errorf("invalid metric name: %v", l.Value) + } + if !model.LabelName(l.Name).IsValid() { + return errors.Errorf("invalid label name: %v", l.Name) + } + if !model.LabelValue(l.Value).IsValid() { + return errors.Errorf("invalid label value: %v", l.Value) + } + if i > 0 && l.Name == ls[i-1].Name { + return errors.Errorf("duplicate label with name: %v", l.Name) + } + } + return nil +} + +func toLabelMatchers(matchers []*labels.Matcher) ([]*prompb.LabelMatcher, error) { + pbMatchers := make([]*prompb.LabelMatcher, 0, len(matchers)) + for _, m := range matchers { + var mType prompb.LabelMatcher_Type + switch m.Type { + case labels.MatchEqual: + mType = prompb.LabelMatcher_EQ + case labels.MatchNotEqual: + mType = prompb.LabelMatcher_NEQ + case labels.MatchRegexp: + mType = prompb.LabelMatcher_RE + case labels.MatchNotRegexp: + mType = prompb.LabelMatcher_NRE + default: + return nil, errors.New("invalid matcher type") + } + pbMatchers = append(pbMatchers, &prompb.LabelMatcher{ + Type: mType, + Name: m.Name, + Value: m.Value, + }) + } + return pbMatchers, nil +} + +// FromLabelMatchers parses protobuf label matchers to Prometheus label matchers. +func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, error) { + result := make([]*labels.Matcher, 0, len(matchers)) + for _, matcher := range matchers { + var mtype labels.MatchType + switch matcher.Type { + case prompb.LabelMatcher_EQ: + mtype = labels.MatchEqual + case prompb.LabelMatcher_NEQ: + mtype = labels.MatchNotEqual + case prompb.LabelMatcher_RE: + mtype = labels.MatchRegexp + case prompb.LabelMatcher_NRE: + mtype = labels.MatchNotRegexp + default: + return nil, errors.New("invalid matcher type") + } + matcher, err := labels.NewMatcher(mtype, matcher.Name, matcher.Value) + if err != nil { + return nil, err + } + result = append(result, matcher) + } + return result, nil +} + +func exemplarProtoToExemplar(ep prompb.Exemplar) exemplar.Exemplar { + timestamp := ep.Timestamp + + return exemplar.Exemplar{ + Labels: labelProtosToLabels(ep.Labels), + Value: ep.Value, + Ts: timestamp, + HasTs: timestamp != 0, + } +} + +// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric +func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric { + metric := make(model.Metric, len(labelPairs)) + for _, l := range labelPairs { + metric[model.LabelName(l.Name)] = model.LabelValue(l.Value) + } + return metric +} + +func labelProtosToLabels(labelPairs []prompb.Label) labels.Labels { + result := make(labels.Labels, 0, len(labelPairs)) + for _, l := range labelPairs { + result = append(result, labels.Label{ + Name: l.Name, + Value: l.Value, + }) + } + sort.Sort(result) + return result +} + +// labelsToLabelsProto transforms labels into prompb labels. The buffer slice +// will be used to avoid allocations if it is big enough to store the labels. +func labelsToLabelsProto(labels labels.Labels, buf []prompb.Label) []prompb.Label { + result := buf[:0] + if cap(buf) < len(labels) { + result = make([]prompb.Label, 0, len(labels)) + } + for _, l := range labels { + result = append(result, prompb.Label{ + Name: l.Name, + Value: l.Value, + }) + } + return result +} + +// metricTypeToMetricTypeProto transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum. +func metricTypeToMetricTypeProto(t textparse.MetricType) prompb.MetricMetadata_MetricType { + mt := strings.ToUpper(string(t)) + v, ok := prompb.MetricMetadata_MetricType_value[mt] + if !ok { + return prompb.MetricMetadata_UNKNOWN + } + + return prompb.MetricMetadata_MetricType(v) +} + +// DecodeWriteRequest from an io.Reader into a prompb.WriteRequest, handling +// snappy decompression. +func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) { + compressed, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + reqBuf, err := snappy.Decode(nil, compressed) + if err != nil { + return nil, err + } + + var req prompb.WriteRequest + if err := proto.Unmarshal(reqBuf, &req); err != nil { + return nil, err + } + + return &req, nil +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/ewma.go b/vendor/github.com/prometheus/prometheus/storage/remote/ewma.go new file mode 100644 index 00000000000..c7fb0289b04 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/storage/remote/ewma.go @@ -0,0 +1,69 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "sync" + "time" + + "go.uber.org/atomic" +) + +// ewmaRate tracks an exponentially weighted moving average of a per-second rate. +type ewmaRate struct { + newEvents atomic.Int64 + + alpha float64 + interval time.Duration + lastRate float64 + init bool + mutex sync.Mutex +} + +// newEWMARate always allocates a new ewmaRate, as this guarantees the atomically +// accessed int64 will be aligned on ARM. See prometheus#2666. +func newEWMARate(alpha float64, interval time.Duration) *ewmaRate { + return &ewmaRate{ + alpha: alpha, + interval: interval, + } +} + +// rate returns the per-second rate. +func (r *ewmaRate) rate() float64 { + r.mutex.Lock() + defer r.mutex.Unlock() + return r.lastRate +} + +// tick assumes to be called every r.interval. +func (r *ewmaRate) tick() { + newEvents := r.newEvents.Swap(0) + instantRate := float64(newEvents) / r.interval.Seconds() + + r.mutex.Lock() + defer r.mutex.Unlock() + + if r.init { + r.lastRate += r.alpha * (instantRate - r.lastRate) + } else if newEvents > 0 { + r.init = true + r.lastRate = instantRate + } +} + +// inc counts one event. +func (r *ewmaRate) incr(incr int64) { + r.newEvents.Add(incr) +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/intern.go b/vendor/github.com/prometheus/prometheus/storage/remote/intern.go new file mode 100644 index 00000000000..23047acd9bb --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/storage/remote/intern.go @@ -0,0 +1,102 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Inspired / copied / modified from https://gitlab.com/cznic/strutil/blob/master/strutil.go, +// which is MIT licensed, so: +// +// Copyright (c) 2014 The strutil Authors. All rights reserved. + +package remote + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/atomic" +) + +var noReferenceReleases = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "string_interner_zero_reference_releases_total", + Help: "The number of times release has been called for strings that are not interned.", +}) + +type pool struct { + mtx sync.RWMutex + pool map[string]*entry +} + +type entry struct { + refs atomic.Int64 + + s string +} + +func newEntry(s string) *entry { + return &entry{s: s} +} + +func newPool() *pool { + return &pool{ + pool: map[string]*entry{}, + } +} + +func (p *pool) intern(s string) string { + if s == "" { + return "" + } + + p.mtx.RLock() + interned, ok := p.pool[s] + p.mtx.RUnlock() + if ok { + interned.refs.Inc() + return interned.s + } + p.mtx.Lock() + defer p.mtx.Unlock() + if interned, ok := p.pool[s]; ok { + interned.refs.Inc() + return interned.s + } + + p.pool[s] = newEntry(s) + p.pool[s].refs.Store(1) + return s +} + +func (p *pool) release(s string) { + p.mtx.RLock() + interned, ok := p.pool[s] + p.mtx.RUnlock() + + if !ok { + noReferenceReleases.Inc() + return + } + + refs := interned.refs.Dec() + if refs > 0 { + return + } + + p.mtx.Lock() + defer p.mtx.Unlock() + if interned.refs.Load() != 0 { + return + } + delete(p.pool, s) +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/max_timestamp.go b/vendor/github.com/prometheus/prometheus/storage/remote/max_timestamp.go new file mode 100644 index 00000000000..3a0a6d6fd4b --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/storage/remote/max_timestamp.go @@ -0,0 +1,47 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +type maxTimestamp struct { + mtx sync.Mutex + value float64 + prometheus.Gauge +} + +func (m *maxTimestamp) Set(value float64) { + m.mtx.Lock() + defer m.mtx.Unlock() + if value > m.value { + m.value = value + m.Gauge.Set(value) + } +} + +func (m *maxTimestamp) Get() float64 { + m.mtx.Lock() + defer m.mtx.Unlock() + return m.value +} + +func (m *maxTimestamp) Collect(c chan<- prometheus.Metric) { + if m.Get() > 0 { + m.Gauge.Collect(c) + } +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go new file mode 100644 index 00000000000..eee36463b10 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go @@ -0,0 +1,164 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/scrape" +) + +// MetadataAppender is an interface used by the Metadata Watcher to send metadata, It is read from the scrape manager, on to somewhere else. +type MetadataAppender interface { + AppendMetadata(context.Context, []scrape.MetricMetadata) +} + +// Watchable represents from where we fetch active targets for metadata. +type Watchable interface { + TargetsActive() map[string][]*scrape.Target +} + +type noopScrapeManager struct{} + +func (noop *noopScrapeManager) Get() (*scrape.Manager, error) { + return nil, errors.New("Scrape manager not ready") +} + +// MetadataWatcher watches the Scrape Manager for a given WriteMetadataTo. +type MetadataWatcher struct { + name string + logger log.Logger + + managerGetter ReadyScrapeManager + manager Watchable + writer MetadataAppender + + interval model.Duration + deadline time.Duration + + done chan struct{} + + softShutdownCtx context.Context + softShutdownCancel context.CancelFunc + hardShutdownCancel context.CancelFunc + hardShutdownCtx context.Context +} + +// NewMetadataWatcher builds a new MetadataWatcher. +func NewMetadataWatcher(l log.Logger, mg ReadyScrapeManager, name string, w MetadataAppender, interval model.Duration, deadline time.Duration) *MetadataWatcher { + if l == nil { + l = log.NewNopLogger() + } + + if mg == nil { + mg = &noopScrapeManager{} + } + + return &MetadataWatcher{ + name: name, + logger: l, + + managerGetter: mg, + writer: w, + + interval: interval, + deadline: deadline, + + done: make(chan struct{}), + } +} + +// Start the MetadataWatcher. +func (mw *MetadataWatcher) Start() { + level.Info(mw.logger).Log("msg", "Starting scraped metadata watcher") + mw.hardShutdownCtx, mw.hardShutdownCancel = context.WithCancel(context.Background()) + mw.softShutdownCtx, mw.softShutdownCancel = context.WithCancel(mw.hardShutdownCtx) + go mw.loop() +} + +// Stop the MetadataWatcher. +func (mw *MetadataWatcher) Stop() { + level.Info(mw.logger).Log("msg", "Stopping metadata watcher...") + defer level.Info(mw.logger).Log("msg", "Scraped metadata watcher stopped") + + mw.softShutdownCancel() + select { + case <-mw.done: + return + case <-time.After(mw.deadline): + level.Error(mw.logger).Log("msg", "Failed to flush metadata") + } + + mw.hardShutdownCancel() + <-mw.done +} + +func (mw *MetadataWatcher) loop() { + ticker := time.NewTicker(time.Duration(mw.interval)) + defer ticker.Stop() + defer close(mw.done) + + for { + select { + case <-mw.softShutdownCtx.Done(): + return + case <-ticker.C: + mw.collect() + } + } +} + +func (mw *MetadataWatcher) collect() { + if !mw.ready() { + return + } + + // We create a set of the metadata to help deduplicating based on the attributes of a + // scrape.MetricMetadata. In this case, a combination of metric name, help, type, and unit. + metadataSet := map[scrape.MetricMetadata]struct{}{} + metadata := []scrape.MetricMetadata{} + for _, tset := range mw.manager.TargetsActive() { + for _, target := range tset { + for _, entry := range target.MetadataList() { + if _, ok := metadataSet[entry]; !ok { + metadata = append(metadata, entry) + metadataSet[entry] = struct{}{} + } + } + } + } + + // Blocks until the metadata is sent to the remote write endpoint or hardShutdownContext is expired. + mw.writer.AppendMetadata(mw.hardShutdownCtx, metadata) +} + +func (mw *MetadataWatcher) ready() bool { + if mw.manager != nil { + return true + } + + m, err := mw.managerGetter.Get() + if err != nil { + return false + } + + mw.manager = m + return true +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go new file mode 100644 index 00000000000..b3323051888 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go @@ -0,0 +1,1306 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "math" + "strconv" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "go.uber.org/atomic" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/scrape" + "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/record" + "github.com/prometheus/prometheus/tsdb/wal" +) + +const ( + // We track samples in/out and how long pushes take using an Exponentially + // Weighted Moving Average. + ewmaWeight = 0.2 + shardUpdateDuration = 10 * time.Second + + // Allow 30% too many shards before scaling down. + shardToleranceFraction = 0.3 +) + +type queueManagerMetrics struct { + reg prometheus.Registerer + + samplesTotal prometheus.Counter + exemplarsTotal prometheus.Counter + metadataTotal prometheus.Counter + failedSamplesTotal prometheus.Counter + failedExemplarsTotal prometheus.Counter + failedMetadataTotal prometheus.Counter + retriedSamplesTotal prometheus.Counter + retriedExemplarsTotal prometheus.Counter + retriedMetadataTotal prometheus.Counter + droppedSamplesTotal prometheus.Counter + droppedExemplarsTotal prometheus.Counter + enqueueRetriesTotal prometheus.Counter + sentBatchDuration prometheus.Histogram + highestSentTimestamp *maxTimestamp + pendingSamples prometheus.Gauge + pendingExemplars prometheus.Gauge + shardCapacity prometheus.Gauge + numShards prometheus.Gauge + maxNumShards prometheus.Gauge + minNumShards prometheus.Gauge + desiredNumShards prometheus.Gauge + sentBytesTotal prometheus.Counter + metadataBytesTotal prometheus.Counter + maxSamplesPerSend prometheus.Gauge +} + +func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManagerMetrics { + m := &queueManagerMetrics{ + reg: r, + } + constLabels := prometheus.Labels{ + remoteName: rn, + endpoint: e, + } + + m.samplesTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "samples_total", + Help: "Total number of samples sent to remote storage.", + ConstLabels: constLabels, + }) + m.exemplarsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "exemplars_total", + Help: "Total number of exemplars sent to remote storage.", + ConstLabels: constLabels, + }) + m.metadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "metadata_total", + Help: "Total number of metadata entries sent to remote storage.", + ConstLabels: constLabels, + }) + m.failedSamplesTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "samples_failed_total", + Help: "Total number of samples which failed on send to remote storage, non-recoverable errors.", + ConstLabels: constLabels, + }) + m.failedExemplarsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "exemplars_failed_total", + Help: "Total number of exemplars which failed on send to remote storage, non-recoverable errors.", + ConstLabels: constLabels, + }) + m.failedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "metadata_failed_total", + Help: "Total number of metadata entries which failed on send to remote storage, non-recoverable errors.", + ConstLabels: constLabels, + }) + m.retriedSamplesTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "samples_retried_total", + Help: "Total number of samples which failed on send to remote storage but were retried because the send error was recoverable.", + ConstLabels: constLabels, + }) + m.retriedExemplarsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "exemplars_retried_total", + Help: "Total number of exemplars which failed on send to remote storage but were retried because the send error was recoverable.", + ConstLabels: constLabels, + }) + m.retriedMetadataTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "metadata_retried_total", + Help: "Total number of metadata entries which failed on send to remote storage but were retried because the send error was recoverable.", + ConstLabels: constLabels, + }) + m.droppedSamplesTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "samples_dropped_total", + Help: "Total number of samples which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID.", + ConstLabels: constLabels, + }) + m.droppedExemplarsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "exemplars_dropped_total", + Help: "Total number of exemplars which were dropped after being read from the WAL before being sent via remote write, either via relabelling or unintentionally because of an unknown reference ID.", + ConstLabels: constLabels, + }) + m.enqueueRetriesTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "enqueue_retries_total", + Help: "Total number of times enqueue has failed because a shards queue was full.", + ConstLabels: constLabels, + }) + m.sentBatchDuration = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "sent_batch_duration_seconds", + Help: "Duration of send calls to the remote storage.", + Buckets: append(prometheus.DefBuckets, 25, 60, 120, 300), + ConstLabels: constLabels, + }) + m.highestSentTimestamp = &maxTimestamp{ + Gauge: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "queue_highest_sent_timestamp_seconds", + Help: "Timestamp from a WAL sample, the highest timestamp successfully sent by this queue, in seconds since epoch.", + ConstLabels: constLabels, + }), + } + m.pendingSamples = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "samples_pending", + Help: "The number of samples pending in the queues shards to be sent to the remote storage.", + ConstLabels: constLabels, + }) + m.pendingExemplars = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "exemplars_pending", + Help: "The number of exemplars pending in the queues shards to be sent to the remote storage.", + ConstLabels: constLabels, + }) + m.shardCapacity = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "shard_capacity", + Help: "The capacity of each shard of the queue used for parallel sending to the remote storage.", + ConstLabels: constLabels, + }) + m.numShards = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "shards", + Help: "The number of shards used for parallel sending to the remote storage.", + ConstLabels: constLabels, + }) + m.maxNumShards = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "shards_max", + Help: "The maximum number of shards that the queue is allowed to run.", + ConstLabels: constLabels, + }) + m.minNumShards = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "shards_min", + Help: "The minimum number of shards that the queue is allowed to run.", + ConstLabels: constLabels, + }) + m.desiredNumShards = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "shards_desired", + Help: "The number of shards that the queues shard calculation wants to run based on the rate of samples in vs. samples out.", + ConstLabels: constLabels, + }) + m.sentBytesTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "bytes_total", + Help: "The total number of bytes of data (not metadata) sent by the queue after compression. Note that when exemplars over remote write is enabled the exemplars included in a remote write request count towards this metric.", + ConstLabels: constLabels, + }) + m.metadataBytesTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "metadata_bytes_total", + Help: "The total number of bytes of metadata sent by the queue after compression.", + ConstLabels: constLabels, + }) + m.maxSamplesPerSend = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "max_samples_per_send", + Help: "The maximum number of samples to be sent, in a single request, to the remote storage. Note that, when sending of exemplars over remote write is enabled, exemplars count towards this limt.", + ConstLabels: constLabels, + }) + + return m +} + +func (m *queueManagerMetrics) register() { + if m.reg != nil { + m.reg.MustRegister( + m.samplesTotal, + m.exemplarsTotal, + m.metadataTotal, + m.failedSamplesTotal, + m.failedExemplarsTotal, + m.failedMetadataTotal, + m.retriedSamplesTotal, + m.retriedExemplarsTotal, + m.retriedMetadataTotal, + m.droppedSamplesTotal, + m.droppedExemplarsTotal, + m.enqueueRetriesTotal, + m.sentBatchDuration, + m.highestSentTimestamp, + m.pendingSamples, + m.pendingExemplars, + m.shardCapacity, + m.numShards, + m.maxNumShards, + m.minNumShards, + m.desiredNumShards, + m.sentBytesTotal, + m.metadataBytesTotal, + m.maxSamplesPerSend, + ) + } +} + +func (m *queueManagerMetrics) unregister() { + if m.reg != nil { + m.reg.Unregister(m.samplesTotal) + m.reg.Unregister(m.exemplarsTotal) + m.reg.Unregister(m.metadataTotal) + m.reg.Unregister(m.failedSamplesTotal) + m.reg.Unregister(m.failedExemplarsTotal) + m.reg.Unregister(m.failedMetadataTotal) + m.reg.Unregister(m.retriedSamplesTotal) + m.reg.Unregister(m.retriedExemplarsTotal) + m.reg.Unregister(m.retriedMetadataTotal) + m.reg.Unregister(m.droppedSamplesTotal) + m.reg.Unregister(m.droppedExemplarsTotal) + m.reg.Unregister(m.enqueueRetriesTotal) + m.reg.Unregister(m.sentBatchDuration) + m.reg.Unregister(m.highestSentTimestamp) + m.reg.Unregister(m.pendingSamples) + m.reg.Unregister(m.pendingExemplars) + m.reg.Unregister(m.shardCapacity) + m.reg.Unregister(m.numShards) + m.reg.Unregister(m.maxNumShards) + m.reg.Unregister(m.minNumShards) + m.reg.Unregister(m.desiredNumShards) + m.reg.Unregister(m.sentBytesTotal) + m.reg.Unregister(m.metadataBytesTotal) + m.reg.Unregister(m.maxSamplesPerSend) + } +} + +// WriteClient defines an interface for sending a batch of samples to an +// external timeseries database. +type WriteClient interface { + // Store stores the given samples in the remote storage. + Store(context.Context, []byte) error + // Name uniquely identifies the remote storage. + Name() string + // Endpoint is the remote read or write endpoint for the storage client. + Endpoint() string +} + +// QueueManager manages a queue of samples to be sent to the Storage +// indicated by the provided WriteClient. Implements writeTo interface +// used by WAL Watcher. +type QueueManager struct { + lastSendTimestamp atomic.Int64 + + logger log.Logger + flushDeadline time.Duration + cfg config.QueueConfig + mcfg config.MetadataConfig + externalLabels labels.Labels + relabelConfigs []*relabel.Config + sendExemplars bool + watcher *wal.Watcher + metadataWatcher *MetadataWatcher + + clientMtx sync.RWMutex + storeClient WriteClient + + seriesMtx sync.Mutex // Covers seriesLabels and droppedSeries. + seriesLabels map[chunks.HeadSeriesRef]labels.Labels + droppedSeries map[chunks.HeadSeriesRef]struct{} + + seriesSegmentMtx sync.Mutex // Covers seriesSegmentIndexes - if you also lock seriesMtx, take seriesMtx first. + seriesSegmentIndexes map[chunks.HeadSeriesRef]int + + shards *shards + numShards int + reshardChan chan int + quit chan struct{} + wg sync.WaitGroup + + dataIn, dataDropped, dataOut, dataOutDuration *ewmaRate + + metrics *queueManagerMetrics + interner *pool + highestRecvTimestamp *maxTimestamp +} + +// NewQueueManager builds a new QueueManager. +func NewQueueManager( + metrics *queueManagerMetrics, + watcherMetrics *wal.WatcherMetrics, + readerMetrics *wal.LiveReaderMetrics, + logger log.Logger, + walDir string, + samplesIn *ewmaRate, + cfg config.QueueConfig, + mCfg config.MetadataConfig, + externalLabels labels.Labels, + relabelConfigs []*relabel.Config, + client WriteClient, + flushDeadline time.Duration, + interner *pool, + highestRecvTimestamp *maxTimestamp, + sm ReadyScrapeManager, + enableExemplarRemoteWrite bool, +) *QueueManager { + if logger == nil { + logger = log.NewNopLogger() + } + + logger = log.With(logger, remoteName, client.Name(), endpoint, client.Endpoint()) + t := &QueueManager{ + logger: logger, + flushDeadline: flushDeadline, + cfg: cfg, + mcfg: mCfg, + externalLabels: externalLabels, + relabelConfigs: relabelConfigs, + storeClient: client, + sendExemplars: enableExemplarRemoteWrite, + + seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels), + seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int), + droppedSeries: make(map[chunks.HeadSeriesRef]struct{}), + + numShards: cfg.MinShards, + reshardChan: make(chan int), + quit: make(chan struct{}), + + dataIn: samplesIn, + dataDropped: newEWMARate(ewmaWeight, shardUpdateDuration), + dataOut: newEWMARate(ewmaWeight, shardUpdateDuration), + dataOutDuration: newEWMARate(ewmaWeight, shardUpdateDuration), + + metrics: metrics, + interner: interner, + highestRecvTimestamp: highestRecvTimestamp, + } + + t.watcher = wal.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, walDir, enableExemplarRemoteWrite) + if t.mcfg.Send { + t.metadataWatcher = NewMetadataWatcher(logger, sm, client.Name(), t, t.mcfg.SendInterval, flushDeadline) + } + t.shards = t.newShards() + + return t +} + +// AppendMetadata sends metadata the remote storage. Metadata is sent in batches, but is not parallelized. +func (t *QueueManager) AppendMetadata(ctx context.Context, metadata []scrape.MetricMetadata) { + mm := make([]prompb.MetricMetadata, 0, len(metadata)) + for _, entry := range metadata { + mm = append(mm, prompb.MetricMetadata{ + MetricFamilyName: entry.Metric, + Help: entry.Help, + Type: metricTypeToMetricTypeProto(entry.Type), + Unit: entry.Unit, + }) + } + + pBuf := proto.NewBuffer(nil) + numSends := int(math.Ceil(float64(len(metadata)) / float64(t.mcfg.MaxSamplesPerSend))) + for i := 0; i < numSends; i++ { + last := (i + 1) * t.mcfg.MaxSamplesPerSend + if last > len(metadata) { + last = len(metadata) + } + err := t.sendMetadataWithBackoff(ctx, mm[i*t.mcfg.MaxSamplesPerSend:last], pBuf) + if err != nil { + t.metrics.failedMetadataTotal.Add(float64(last - (i * t.mcfg.MaxSamplesPerSend))) + level.Error(t.logger).Log("msg", "non-recoverable error while sending metadata", "count", last-(i*t.mcfg.MaxSamplesPerSend), "err", err) + } + } +} + +func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []prompb.MetricMetadata, pBuf *proto.Buffer) error { + // Build the WriteRequest with no samples. + req, _, err := buildWriteRequest(nil, metadata, pBuf, nil) + if err != nil { + return err + } + + metadataCount := len(metadata) + + attemptStore := func(try int) error { + span, ctx := opentracing.StartSpanFromContext(ctx, "Remote Metadata Send Batch") + defer span.Finish() + + span.SetTag("metadata", metadataCount) + span.SetTag("try", try) + span.SetTag("remote_name", t.storeClient.Name()) + span.SetTag("remote_url", t.storeClient.Endpoint()) + + begin := time.Now() + err := t.storeClient.Store(ctx, req) + t.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) + + if err != nil { + span.LogKV("error", err) + ext.Error.Set(span, true) + return err + } + + return nil + } + + retry := func() { + t.metrics.retriedMetadataTotal.Add(float64(len(metadata))) + } + err = sendWriteRequestWithBackoff(ctx, t.cfg, t.logger, attemptStore, retry) + if err != nil { + return err + } + t.metrics.metadataTotal.Add(float64(len(metadata))) + t.metrics.metadataBytesTotal.Add(float64(len(req))) + return nil +} + +// Append queues a sample to be sent to the remote storage. Blocks until all samples are +// enqueued on their shards or a shutdown signal is received. +func (t *QueueManager) Append(samples []record.RefSample) bool { + var appendSample prompb.Sample +outer: + for _, s := range samples { + t.seriesMtx.Lock() + lbls, ok := t.seriesLabels[s.Ref] + if !ok { + t.metrics.droppedSamplesTotal.Inc() + t.dataDropped.incr(1) + if _, ok := t.droppedSeries[s.Ref]; !ok { + level.Info(t.logger).Log("msg", "Dropped sample for series that was not explicitly dropped via relabelling", "ref", s.Ref) + } + t.seriesMtx.Unlock() + continue + } + t.seriesMtx.Unlock() + // This will only loop if the queues are being resharded. + backoff := t.cfg.MinBackoff + for { + select { + case <-t.quit: + return false + default: + } + appendSample.Value = s.V + appendSample.Timestamp = s.T + if t.shards.enqueue(s.Ref, writeSample{lbls, appendSample}) { + continue outer + } + + t.metrics.enqueueRetriesTotal.Inc() + time.Sleep(time.Duration(backoff)) + backoff = backoff * 2 + if backoff > t.cfg.MaxBackoff { + backoff = t.cfg.MaxBackoff + } + } + } + return true +} + +func (t *QueueManager) AppendExemplars(exemplars []record.RefExemplar) bool { + if !t.sendExemplars { + return true + } + + var appendExemplar prompb.Exemplar +outer: + for _, e := range exemplars { + t.seriesMtx.Lock() + lbls, ok := t.seriesLabels[e.Ref] + if !ok { + t.metrics.droppedExemplarsTotal.Inc() + // Track dropped exemplars in the same EWMA for sharding calc. + t.dataDropped.incr(1) + if _, ok := t.droppedSeries[e.Ref]; !ok { + level.Info(t.logger).Log("msg", "Dropped exemplar for series that was not explicitly dropped via relabelling", "ref", e.Ref) + } + t.seriesMtx.Unlock() + continue + } + t.seriesMtx.Unlock() + // This will only loop if the queues are being resharded. + backoff := t.cfg.MinBackoff + for { + select { + case <-t.quit: + return false + default: + } + appendExemplar.Labels = labelsToLabelsProto(e.Labels, nil) + appendExemplar.Timestamp = e.T + appendExemplar.Value = e.V + if t.shards.enqueue(e.Ref, writeExemplar{lbls, appendExemplar}) { + continue outer + } + + t.metrics.enqueueRetriesTotal.Inc() + time.Sleep(time.Duration(backoff)) + backoff = backoff * 2 + if backoff > t.cfg.MaxBackoff { + backoff = t.cfg.MaxBackoff + } + } + } + return true +} + +// Start the queue manager sending samples to the remote storage. +// Does not block. +func (t *QueueManager) Start() { + // Register and initialise some metrics. + t.metrics.register() + t.metrics.shardCapacity.Set(float64(t.cfg.Capacity)) + t.metrics.maxNumShards.Set(float64(t.cfg.MaxShards)) + t.metrics.minNumShards.Set(float64(t.cfg.MinShards)) + t.metrics.desiredNumShards.Set(float64(t.cfg.MinShards)) + t.metrics.maxSamplesPerSend.Set(float64(t.cfg.MaxSamplesPerSend)) + + t.shards.start(t.numShards) + t.watcher.Start() + if t.mcfg.Send { + t.metadataWatcher.Start() + } + + t.wg.Add(2) + go t.updateShardsLoop() + go t.reshardLoop() +} + +// Stop stops sending samples to the remote storage and waits for pending +// sends to complete. +func (t *QueueManager) Stop() { + level.Info(t.logger).Log("msg", "Stopping remote storage...") + defer level.Info(t.logger).Log("msg", "Remote storage stopped.") + + close(t.quit) + t.wg.Wait() + // Wait for all QueueManager routines to end before stopping shards, metadata watcher, and WAL watcher. This + // is to ensure we don't end up executing a reshard and shards.stop() at the same time, which + // causes a closed channel panic. + t.shards.stop() + t.watcher.Stop() + if t.mcfg.Send { + t.metadataWatcher.Stop() + } + + // On shutdown, release the strings in the labels from the intern pool. + t.seriesMtx.Lock() + for _, labels := range t.seriesLabels { + t.releaseLabels(labels) + } + t.seriesMtx.Unlock() + t.metrics.unregister() +} + +// StoreSeries keeps track of which series we know about for lookups when sending samples to remote. +func (t *QueueManager) StoreSeries(series []record.RefSeries, index int) { + t.seriesMtx.Lock() + defer t.seriesMtx.Unlock() + t.seriesSegmentMtx.Lock() + defer t.seriesSegmentMtx.Unlock() + for _, s := range series { + // Just make sure all the Refs of Series will insert into seriesSegmentIndexes map for tracking. + t.seriesSegmentIndexes[s.Ref] = index + + ls := processExternalLabels(s.Labels, t.externalLabels) + lbls := relabel.Process(ls, t.relabelConfigs...) + if len(lbls) == 0 { + t.droppedSeries[s.Ref] = struct{}{} + continue + } + t.internLabels(lbls) + + // We should not ever be replacing a series labels in the map, but just + // in case we do we need to ensure we do not leak the replaced interned + // strings. + if orig, ok := t.seriesLabels[s.Ref]; ok { + t.releaseLabels(orig) + } + t.seriesLabels[s.Ref] = lbls + } +} + +// UpdateSeriesSegment updates the segment number held against the series, +// so we can trim older ones in SeriesReset. +func (t *QueueManager) UpdateSeriesSegment(series []record.RefSeries, index int) { + t.seriesSegmentMtx.Lock() + defer t.seriesSegmentMtx.Unlock() + for _, s := range series { + t.seriesSegmentIndexes[s.Ref] = index + } +} + +// SeriesReset is used when reading a checkpoint. WAL Watcher should have +// stored series records with the checkpoints index number, so we can now +// delete any ref ID's lower than that # from the two maps. +func (t *QueueManager) SeriesReset(index int) { + t.seriesMtx.Lock() + defer t.seriesMtx.Unlock() + t.seriesSegmentMtx.Lock() + defer t.seriesSegmentMtx.Unlock() + // Check for series that are in segments older than the checkpoint + // that were not also present in the checkpoint. + for k, v := range t.seriesSegmentIndexes { + if v < index { + delete(t.seriesSegmentIndexes, k) + t.releaseLabels(t.seriesLabels[k]) + delete(t.seriesLabels, k) + delete(t.droppedSeries, k) + } + } +} + +// SetClient updates the client used by a queue. Used when only client specific +// fields are updated to avoid restarting the queue. +func (t *QueueManager) SetClient(c WriteClient) { + t.clientMtx.Lock() + t.storeClient = c + t.clientMtx.Unlock() +} + +func (t *QueueManager) client() WriteClient { + t.clientMtx.RLock() + defer t.clientMtx.RUnlock() + return t.storeClient +} + +func (t *QueueManager) internLabels(lbls labels.Labels) { + for i, l := range lbls { + lbls[i].Name = t.interner.intern(l.Name) + lbls[i].Value = t.interner.intern(l.Value) + } +} + +func (t *QueueManager) releaseLabels(ls labels.Labels) { + for _, l := range ls { + t.interner.release(l.Name) + t.interner.release(l.Value) + } +} + +// processExternalLabels merges externalLabels into ls. If ls contains +// a label in externalLabels, the value in ls wins. +func processExternalLabels(ls, externalLabels labels.Labels) labels.Labels { + i, j, result := 0, 0, make(labels.Labels, 0, len(ls)+len(externalLabels)) + for i < len(ls) && j < len(externalLabels) { + if ls[i].Name < externalLabels[j].Name { + result = append(result, labels.Label{ + Name: ls[i].Name, + Value: ls[i].Value, + }) + i++ + } else if ls[i].Name > externalLabels[j].Name { + result = append(result, externalLabels[j]) + j++ + } else { + result = append(result, labels.Label{ + Name: ls[i].Name, + Value: ls[i].Value, + }) + i++ + j++ + } + } + + return append(append(result, ls[i:]...), externalLabels[j:]...) +} + +func (t *QueueManager) updateShardsLoop() { + defer t.wg.Done() + + ticker := time.NewTicker(shardUpdateDuration) + defer ticker.Stop() + for { + select { + case <-ticker.C: + desiredShards := t.calculateDesiredShards() + if !t.shouldReshard(desiredShards) { + continue + } + // Resharding can take some time, and we want this loop + // to stay close to shardUpdateDuration. + select { + case t.reshardChan <- desiredShards: + level.Info(t.logger).Log("msg", "Remote storage resharding", "from", t.numShards, "to", desiredShards) + t.numShards = desiredShards + default: + level.Info(t.logger).Log("msg", "Currently resharding, skipping.") + } + case <-t.quit: + return + } + } +} + +// shouldReshard returns if resharding should occur +func (t *QueueManager) shouldReshard(desiredShards int) bool { + if desiredShards == t.numShards { + return false + } + // We shouldn't reshard if Prometheus hasn't been able to send to the + // remote endpoint successfully within some period of time. + minSendTimestamp := time.Now().Add(-2 * time.Duration(t.cfg.BatchSendDeadline)).Unix() + lsts := t.lastSendTimestamp.Load() + if lsts < minSendTimestamp { + level.Warn(t.logger).Log("msg", "Skipping resharding, last successful send was beyond threshold", "lastSendTimestamp", lsts, "minSendTimestamp", minSendTimestamp) + return false + } + return true +} + +// calculateDesiredShards returns the number of desired shards, which will be +// the current QueueManager.numShards if resharding should not occur for reasons +// outlined in this functions implementation. It is up to the caller to reshard, or not, +// based on the return value. +func (t *QueueManager) calculateDesiredShards() int { + t.dataOut.tick() + t.dataDropped.tick() + t.dataOutDuration.tick() + + // We use the number of incoming samples as a prediction of how much work we + // will need to do next iteration. We add to this any pending samples + // (received - send) so we can catch up with any backlog. We use the average + // outgoing batch latency to work out how many shards we need. + var ( + dataInRate = t.dataIn.rate() + dataOutRate = t.dataOut.rate() + dataKeptRatio = dataOutRate / (t.dataDropped.rate() + dataOutRate) + dataOutDuration = t.dataOutDuration.rate() / float64(time.Second) + dataPendingRate = dataInRate*dataKeptRatio - dataOutRate + highestSent = t.metrics.highestSentTimestamp.Get() + highestRecv = t.highestRecvTimestamp.Get() + delay = highestRecv - highestSent + dataPending = delay * dataInRate * dataKeptRatio + ) + + if dataOutRate <= 0 { + return t.numShards + } + + // When behind we will try to catch up on a proporation of samples per tick. + // This works similarly to an integral accumulator in that pending samples + // is the result of the error integral. + const integralGain = 0.1 / float64(shardUpdateDuration/time.Second) + + var ( + timePerSample = dataOutDuration / dataOutRate + desiredShards = timePerSample * (dataInRate*dataKeptRatio + integralGain*dataPending) + ) + t.metrics.desiredNumShards.Set(desiredShards) + level.Debug(t.logger).Log("msg", "QueueManager.calculateDesiredShards", + "dataInRate", dataInRate, + "dataOutRate", dataOutRate, + "dataKeptRatio", dataKeptRatio, + "dataPendingRate", dataPendingRate, + "dataPending", dataPending, + "dataOutDuration", dataOutDuration, + "timePerSample", timePerSample, + "desiredShards", desiredShards, + "highestSent", highestSent, + "highestRecv", highestRecv, + ) + + // Changes in the number of shards must be greater than shardToleranceFraction. + var ( + lowerBound = float64(t.numShards) * (1. - shardToleranceFraction) + upperBound = float64(t.numShards) * (1. + shardToleranceFraction) + ) + level.Debug(t.logger).Log("msg", "QueueManager.updateShardsLoop", + "lowerBound", lowerBound, "desiredShards", desiredShards, "upperBound", upperBound) + if lowerBound <= desiredShards && desiredShards <= upperBound { + return t.numShards + } + + numShards := int(math.Ceil(desiredShards)) + // Do not downshard if we are more than ten seconds back. + if numShards < t.numShards && delay > 10.0 { + level.Debug(t.logger).Log("msg", "Not downsharding due to being too far behind") + return t.numShards + } + + if numShards > t.cfg.MaxShards { + numShards = t.cfg.MaxShards + } else if numShards < t.cfg.MinShards { + numShards = t.cfg.MinShards + } + return numShards +} + +func (t *QueueManager) reshardLoop() { + defer t.wg.Done() + + for { + select { + case numShards := <-t.reshardChan: + // We start the newShards after we have stopped (the therefore completely + // flushed) the oldShards, to guarantee we only every deliver samples in + // order. + t.shards.stop() + t.shards.start(numShards) + case <-t.quit: + return + } + } +} + +func (t *QueueManager) newShards() *shards { + s := &shards{ + qm: t, + done: make(chan struct{}), + } + return s +} + +type writeSample struct { + seriesLabels labels.Labels + sample prompb.Sample +} + +type writeExemplar struct { + seriesLabels labels.Labels + exemplar prompb.Exemplar +} + +type shards struct { + mtx sync.RWMutex // With the WAL, this is never actually contended. + + qm *QueueManager + queues []chan interface{} + // So we can accurately track how many of each are lost during shard shutdowns. + enqueuedSamples atomic.Int64 + enqueuedExemplars atomic.Int64 + + // Emulate a wait group with a channel and an atomic int, as you + // cannot select on a wait group. + done chan struct{} + running atomic.Int32 + + // Soft shutdown context will prevent new enqueues and deadlocks. + softShutdown chan struct{} + + // Hard shutdown context is used to terminate outgoing HTTP connections + // after giving them a chance to terminate. + hardShutdown context.CancelFunc + samplesDroppedOnHardShutdown atomic.Uint32 + exemplarsDroppedOnHardShutdown atomic.Uint32 +} + +// start the shards; must be called before any call to enqueue. +func (s *shards) start(n int) { + s.mtx.Lock() + defer s.mtx.Unlock() + + s.qm.metrics.pendingSamples.Set(0) + s.qm.metrics.numShards.Set(float64(n)) + + newQueues := make([]chan interface{}, n) + for i := 0; i < n; i++ { + newQueues[i] = make(chan interface{}, s.qm.cfg.Capacity) + } + + s.queues = newQueues + + var hardShutdownCtx context.Context + hardShutdownCtx, s.hardShutdown = context.WithCancel(context.Background()) + s.softShutdown = make(chan struct{}) + s.running.Store(int32(n)) + s.done = make(chan struct{}) + s.samplesDroppedOnHardShutdown.Store(0) + s.exemplarsDroppedOnHardShutdown.Store(0) + for i := 0; i < n; i++ { + go s.runShard(hardShutdownCtx, i, newQueues[i]) + } +} + +// stop the shards; subsequent call to enqueue will return false. +func (s *shards) stop() { + // Attempt a clean shutdown, but only wait flushDeadline for all the shards + // to cleanly exit. As we're doing RPCs, enqueue can block indefinitely. + // We must be able so call stop concurrently, hence we can only take the + // RLock here. + s.mtx.RLock() + close(s.softShutdown) + s.mtx.RUnlock() + + // Enqueue should now be unblocked, so we can take the write lock. This + // also ensures we don't race with writes to the queues, and get a panic: + // send on closed channel. + s.mtx.Lock() + defer s.mtx.Unlock() + for _, queue := range s.queues { + close(queue) + } + select { + case <-s.done: + return + case <-time.After(s.qm.flushDeadline): + } + + // Force an unclean shutdown. + s.hardShutdown() + <-s.done + if dropped := s.samplesDroppedOnHardShutdown.Load(); dropped > 0 { + level.Error(s.qm.logger).Log("msg", "Failed to flush all samples on shutdown", "count", dropped) + } + if dropped := s.exemplarsDroppedOnHardShutdown.Load(); dropped > 0 { + level.Error(s.qm.logger).Log("msg", "Failed to flush all exemplars on shutdown", "count", dropped) + } +} + +// enqueue data (sample or exemplar). If we are currently in the process of shutting down or resharding, +// will return false; in this case, you should back off and retry. +func (s *shards) enqueue(ref chunks.HeadSeriesRef, data interface{}) bool { + s.mtx.RLock() + defer s.mtx.RUnlock() + + select { + case <-s.softShutdown: + return false + default: + } + + shard := uint64(ref) % uint64(len(s.queues)) + select { + case <-s.softShutdown: + return false + case s.queues[shard] <- data: + switch data.(type) { + case writeSample: + s.qm.metrics.pendingSamples.Inc() + s.enqueuedSamples.Inc() + case writeExemplar: + s.qm.metrics.pendingExemplars.Inc() + s.enqueuedExemplars.Inc() + default: + level.Warn(s.qm.logger).Log("msg", "Invalid object type in shards enqueue") + } + return true + } +} + +func (s *shards) runShard(ctx context.Context, shardID int, queue chan interface{}) { + defer func() { + if s.running.Dec() == 0 { + close(s.done) + } + }() + + shardNum := strconv.Itoa(shardID) + + // Send batches of at most MaxSamplesPerSend samples to the remote storage. + // If we have fewer samples than that, flush them out after a deadline anyways. + var ( + max = s.qm.cfg.MaxSamplesPerSend + nPending, nPendingSamples, nPendingExemplars = 0, 0, 0 + + pBuf = proto.NewBuffer(nil) + buf []byte + ) + if s.qm.sendExemplars { + max += int(float64(max) * 0.1) + } + + pendingData := make([]prompb.TimeSeries, max) + for i := range pendingData { + pendingData[i].Samples = []prompb.Sample{{}} + if s.qm.sendExemplars { + pendingData[i].Exemplars = []prompb.Exemplar{{}} + } + } + + timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline)) + stop := func() { + if !timer.Stop() { + select { + case <-timer.C: + default: + } + } + } + defer stop() + + for { + select { + case <-ctx.Done(): + // In this case we drop all samples in the buffer and the queue. + // Remove them from pending and mark them as failed. + droppedSamples := nPendingSamples + int(s.enqueuedSamples.Load()) + droppedExemplars := nPendingExemplars + int(s.enqueuedExemplars.Load()) + s.qm.metrics.pendingSamples.Sub(float64(droppedSamples)) + s.qm.metrics.pendingExemplars.Sub(float64(droppedExemplars)) + s.qm.metrics.failedSamplesTotal.Add(float64(droppedSamples)) + s.qm.metrics.failedExemplarsTotal.Add(float64(droppedExemplars)) + s.samplesDroppedOnHardShutdown.Add(uint32(droppedSamples)) + s.exemplarsDroppedOnHardShutdown.Add(uint32(droppedExemplars)) + return + + case sample, ok := <-queue: + if !ok { + if nPendingSamples > 0 || nPendingExemplars > 0 { + level.Debug(s.qm.logger).Log("msg", "Flushing data to remote storage...", "samples", nPendingSamples, "exemplars", nPendingExemplars) + s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, pBuf, &buf) + s.qm.metrics.pendingSamples.Sub(float64(nPendingSamples)) + s.qm.metrics.pendingExemplars.Sub(float64(nPendingExemplars)) + level.Debug(s.qm.logger).Log("msg", "Done flushing.") + } + return + } + + pendingData[nPending].Samples = pendingData[nPending].Samples[:0] + if s.qm.sendExemplars { + pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0] + } + // Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff) + // retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll + // stop reading from the queue. This makes it safe to reference pendingSamples by index. + switch d := sample.(type) { + case writeSample: + pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels) + pendingData[nPending].Samples = append(pendingData[nPending].Samples, d.sample) + nPendingSamples++ + nPending++ + + case writeExemplar: + pendingData[nPending].Labels = labelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels) + pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, d.exemplar) + nPendingExemplars++ + nPending++ + } + + if nPending >= max { + s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, pBuf, &buf) + s.qm.metrics.pendingSamples.Sub(float64(nPendingSamples)) + s.qm.metrics.pendingExemplars.Sub(float64(nPendingExemplars)) + nPendingSamples = 0 + nPendingExemplars = 0 + nPending = 0 + + stop() + timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline)) + } + + case <-timer.C: + if nPendingSamples > 0 || nPendingExemplars > 0 { + level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, "exemplars", nPendingExemplars, "shard", shardNum) + s.sendSamples(ctx, pendingData[:nPending], nPendingSamples, nPendingExemplars, pBuf, &buf) + s.qm.metrics.pendingSamples.Sub(float64(nPendingSamples)) + s.qm.metrics.pendingExemplars.Sub(float64(nPendingExemplars)) + nPendingSamples = 0 + nPendingExemplars = 0 + nPending = 0 + } + timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline)) + } + } +} + +func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) { + begin := time.Now() + err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, pBuf, buf) + if err != nil { + level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "err", err) + s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount)) + s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarCount)) + } + + // These counters are used to calculate the dynamic sharding, and as such + // should be maintained irrespective of success or failure. + s.qm.dataOut.incr(int64(len(samples))) + s.qm.dataOutDuration.incr(int64(time.Since(begin))) + s.qm.lastSendTimestamp.Store(time.Now().Unix()) +} + +// sendSamples to the remote storage with backoff for recoverable errors. +func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount int, pBuf *proto.Buffer, buf *[]byte) error { + // Build the WriteRequest with no metadata. + req, highest, err := buildWriteRequest(samples, nil, pBuf, *buf) + if err != nil { + // Failing to build the write request is non-recoverable, since it will + // only error if marshaling the proto to bytes fails. + return err + } + + reqSize := len(req) + *buf = req + + // An anonymous function allows us to defer the completion of our per-try spans + // without causing a memory leak, and it has the nice effect of not propagating any + // parameters for sendSamplesWithBackoff/3. + attemptStore := func(try int) error { + span, ctx := opentracing.StartSpanFromContext(ctx, "Remote Send Batch") + defer span.Finish() + + span.SetTag("samples", sampleCount) + if exemplarCount > 0 { + span.SetTag("exemplars", exemplarCount) + } + span.SetTag("request_size", reqSize) + span.SetTag("try", try) + span.SetTag("remote_name", s.qm.storeClient.Name()) + span.SetTag("remote_url", s.qm.storeClient.Endpoint()) + + begin := time.Now() + s.qm.metrics.samplesTotal.Add(float64(sampleCount)) + s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount)) + err := s.qm.client().Store(ctx, *buf) + s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds()) + + if err != nil { + span.LogKV("error", err) + ext.Error.Set(span, true) + return err + } + + return nil + } + + onRetry := func() { + s.qm.metrics.retriedSamplesTotal.Add(float64(sampleCount)) + s.qm.metrics.retriedExemplarsTotal.Add(float64(exemplarCount)) + } + + err = sendWriteRequestWithBackoff(ctx, s.qm.cfg, s.qm.logger, attemptStore, onRetry) + if err != nil { + return err + } + s.qm.metrics.sentBytesTotal.Add(float64(reqSize)) + s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000)) + return nil +} + +func sendWriteRequestWithBackoff(ctx context.Context, cfg config.QueueConfig, l log.Logger, attempt func(int) error, onRetry func()) error { + backoff := cfg.MinBackoff + sleepDuration := model.Duration(0) + try := 0 + + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + err := attempt(try) + + if err == nil { + return nil + } + + // If the error is unrecoverable, we should not retry. + backoffErr, ok := err.(RecoverableError) + if !ok { + return err + } + + sleepDuration = backoff + if backoffErr.retryAfter > 0 { + sleepDuration = backoffErr.retryAfter + level.Info(l).Log("msg", "Retrying after duration specified by Retry-After header", "duration", sleepDuration) + } else if backoffErr.retryAfter < 0 { + level.Debug(l).Log("msg", "retry-after cannot be in past, retrying using default backoff mechanism") + } + + select { + case <-ctx.Done(): + case <-time.After(time.Duration(sleepDuration)): + } + + // If we make it this far, we've encountered a recoverable error and will retry. + onRetry() + level.Warn(l).Log("msg", "Failed to send batch, retrying", "err", err) + + backoff = sleepDuration * 2 + + if backoff > cfg.MaxBackoff { + backoff = cfg.MaxBackoff + } + + try++ + } +} + +func buildWriteRequest(samples []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf []byte) ([]byte, int64, error) { + var highest int64 + for _, ts := range samples { + // At the moment we only ever append a TimeSeries with a single sample or exemplar in it. + if len(ts.Samples) > 0 && ts.Samples[0].Timestamp > highest { + highest = ts.Samples[0].Timestamp + } + if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest { + highest = ts.Exemplars[0].Timestamp + } + } + + req := &prompb.WriteRequest{ + Timeseries: samples, + Metadata: metadata, + } + + if pBuf == nil { + pBuf = proto.NewBuffer(nil) // For convenience in tests. Not efficient. + } else { + pBuf.Reset() + } + err := pBuf.Marshal(req) + if err != nil { + return nil, highest, err + } + + // snappy uses len() to see if it needs to allocate a new slice. Make the + // buffer as long as possible. + if buf != nil { + buf = buf[0:cap(buf)] + } + compressed := snappy.Encode(buf, pBuf.Bytes()) + return compressed, highest, nil +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read.go b/vendor/github.com/prometheus/prometheus/storage/remote/read.go new file mode 100644 index 00000000000..07176359379 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/storage/remote/read.go @@ -0,0 +1,283 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + + "github.com/pkg/errors" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" +) + +type sampleAndChunkQueryableClient struct { + client ReadClient + externalLabels labels.Labels + requiredMatchers []*labels.Matcher + readRecent bool + callback startTimeCallback +} + +// NewSampleAndChunkQueryableClient returns a storage.SampleAndChunkQueryable which queries the given client to select series sets. +func NewSampleAndChunkQueryableClient( + c ReadClient, + externalLabels labels.Labels, + requiredMatchers []*labels.Matcher, + readRecent bool, + callback startTimeCallback, +) storage.SampleAndChunkQueryable { + return &sampleAndChunkQueryableClient{ + client: c, + + externalLabels: externalLabels, + requiredMatchers: requiredMatchers, + readRecent: readRecent, + callback: callback, + } +} + +func (c *sampleAndChunkQueryableClient) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + q := &querier{ + ctx: ctx, + mint: mint, + maxt: maxt, + client: c.client, + externalLabels: c.externalLabels, + requiredMatchers: c.requiredMatchers, + } + if c.readRecent { + return q, nil + } + + var ( + noop bool + err error + ) + q.maxt, noop, err = c.preferLocalStorage(mint, maxt) + if err != nil { + return nil, err + } + if noop { + return storage.NoopQuerier(), nil + } + return q, nil +} + +func (c *sampleAndChunkQueryableClient) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { + cq := &chunkQuerier{ + querier: querier{ + ctx: ctx, + mint: mint, + maxt: maxt, + client: c.client, + externalLabels: c.externalLabels, + requiredMatchers: c.requiredMatchers, + }, + } + if c.readRecent { + return cq, nil + } + + var ( + noop bool + err error + ) + cq.querier.maxt, noop, err = c.preferLocalStorage(mint, maxt) + if err != nil { + return nil, err + } + if noop { + return storage.NoopChunkedQuerier(), nil + } + return cq, nil +} + +// preferLocalStorage returns noop if requested timeframe can be answered completely by the local TSDB, and +// reduces maxt if the timeframe can be partially answered by TSDB. +func (c *sampleAndChunkQueryableClient) preferLocalStorage(mint, maxt int64) (cmaxt int64, noop bool, err error) { + localStartTime, err := c.callback() + if err != nil { + return 0, false, err + } + cmaxt = maxt + + // Avoid queries whose time range is later than the first timestamp in local DB. + if mint > localStartTime { + return 0, true, nil + } + // Query only samples older than the first timestamp in local DB. + if maxt > localStartTime { + cmaxt = localStartTime + } + return cmaxt, false, nil +} + +type querier struct { + ctx context.Context + mint, maxt int64 + client ReadClient + + // Derived from configuration. + externalLabels labels.Labels + requiredMatchers []*labels.Matcher +} + +// Select implements storage.Querier and uses the given matchers to read series sets from the client. +// Select also adds equality matchers for all external labels to the list of matchers before calling remote endpoint. +// The added external labels are removed from the returned series sets. +// +// If requiredMatchers are given, select returns a NoopSeriesSet if the given matchers don't match the label set of the +// requiredMatchers. Otherwise it'll just call remote endpoint. +func (q *querier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + if len(q.requiredMatchers) > 0 { + // Copy to not modify slice configured by user. + requiredMatchers := append([]*labels.Matcher{}, q.requiredMatchers...) + for _, m := range matchers { + for i, r := range requiredMatchers { + if m.Type == labels.MatchEqual && m.Name == r.Name && m.Value == r.Value { + // Requirement matched. + requiredMatchers = append(requiredMatchers[:i], requiredMatchers[i+1:]...) + break + } + } + if len(requiredMatchers) == 0 { + break + } + } + if len(requiredMatchers) > 0 { + return storage.NoopSeriesSet() + } + } + + m, added := q.addExternalLabels(matchers) + query, err := ToQuery(q.mint, q.maxt, m, hints) + if err != nil { + return storage.ErrSeriesSet(errors.Wrap(err, "toQuery")) + } + + res, err := q.client.Read(q.ctx, query) + if err != nil { + return storage.ErrSeriesSet(errors.Wrap(err, "remote_read")) + } + return newSeriesSetFilter(FromQueryResult(sortSeries, res), added) +} + +// addExternalLabels adds matchers for each external label. External labels +// that already have a corresponding user-supplied matcher are skipped, as we +// assume that the user explicitly wants to select a different value for them. +// We return the new set of matchers, along with a map of labels for which +// matchers were added, so that these can later be removed from the result +// time series again. +func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, labels.Labels) { + el := make(labels.Labels, len(q.externalLabels)) + copy(el, q.externalLabels) + + // ms won't be sorted, so have to O(n^2) the search. + for _, m := range ms { + for i := 0; i < len(el); { + if el[i].Name == m.Name { + el = el[:i+copy(el[i:], el[i+1:])] + continue + } + i++ + } + } + + for _, l := range el { + m, err := labels.NewMatcher(labels.MatchEqual, l.Name, l.Value) + if err != nil { + panic(err) + } + ms = append(ms, m) + } + return ms, el +} + +// LabelValues implements storage.Querier and is a noop. +func (q *querier) LabelValues(string, ...*labels.Matcher) ([]string, storage.Warnings, error) { + // TODO: Implement: https://github.com/prometheus/prometheus/issues/3351 + return nil, nil, errors.New("not implemented") +} + +// LabelNames implements storage.Querier and is a noop. +func (q *querier) LabelNames(...*labels.Matcher) ([]string, storage.Warnings, error) { + // TODO: Implement: https://github.com/prometheus/prometheus/issues/3351 + return nil, nil, errors.New("not implemented") +} + +// Close implements storage.Querier and is a noop. +func (q *querier) Close() error { + return nil +} + +// chunkQuerier is an adapter to make a client usable as a storage.ChunkQuerier. +type chunkQuerier struct { + querier +} + +// Select implements storage.ChunkQuerier and uses the given matchers to read chunk series sets from the client. +// It uses remote.querier.Select so it supports external labels and required matchers if specified. +func (q *chunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { + // TODO(bwplotka) Support remote read chunked and allow returning chunks directly (TODO ticket). + return storage.NewSeriesSetToChunkSet(q.querier.Select(sortSeries, hints, matchers...)) +} + +func newSeriesSetFilter(ss storage.SeriesSet, toFilter labels.Labels) storage.SeriesSet { + return &seriesSetFilter{ + SeriesSet: ss, + toFilter: toFilter, + } +} + +type seriesSetFilter struct { + storage.SeriesSet + toFilter labels.Labels + querier storage.Querier +} + +func (ssf *seriesSetFilter) GetQuerier() storage.Querier { + return ssf.querier +} + +func (ssf *seriesSetFilter) SetQuerier(querier storage.Querier) { + ssf.querier = querier +} + +func (ssf seriesSetFilter) At() storage.Series { + return seriesFilter{ + Series: ssf.SeriesSet.At(), + toFilter: ssf.toFilter, + } +} + +type seriesFilter struct { + storage.Series + toFilter labels.Labels +} + +func (sf seriesFilter) Labels() labels.Labels { + labels := sf.Series.Labels() + for i, j := 0, 0; i < len(labels) && j < len(sf.toFilter); { + if labels[i].Name < sf.toFilter[j].Name { + i++ + } else if labels[i].Name > sf.toFilter[j].Name { + j++ + } else { + labels = labels[:i+copy(labels[i:], labels[i+1:])] + j++ + } + } + return labels +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go new file mode 100644 index 00000000000..e1f1df21c19 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go @@ -0,0 +1,272 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "net/http" + "sort" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/gate" +) + +type readHandler struct { + logger log.Logger + queryable storage.SampleAndChunkQueryable + config func() config.Config + remoteReadSampleLimit int + remoteReadMaxBytesInFrame int + remoteReadGate *gate.Gate + queries prometheus.Gauge +} + +// NewReadHandler creates a http.Handler that accepts remote read requests and +// writes them to the provided queryable. +func NewReadHandler(logger log.Logger, r prometheus.Registerer, queryable storage.SampleAndChunkQueryable, config func() config.Config, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame int) http.Handler { + h := &readHandler{ + logger: logger, + queryable: queryable, + config: config, + remoteReadSampleLimit: remoteReadSampleLimit, + remoteReadGate: gate.New(remoteReadConcurrencyLimit), + remoteReadMaxBytesInFrame: remoteReadMaxBytesInFrame, + + queries: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "prometheus", + Subsystem: "api", // TODO: changes to storage in Prometheus 3.0. + Name: "remote_read_queries", + Help: "The current number of remote read queries being executed or waiting.", + }), + } + if r != nil { + r.MustRegister(h.queries) + } + return h +} + +func (h *readHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + if err := h.remoteReadGate.Start(ctx); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + h.queries.Inc() + + defer h.remoteReadGate.Done() + defer h.queries.Dec() + + req, err := DecodeReadRequest(r) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + externalLabels := h.config().GlobalConfig.ExternalLabels.Map() + + sortedExternalLabels := make([]prompb.Label, 0, len(externalLabels)) + for name, value := range externalLabels { + sortedExternalLabels = append(sortedExternalLabels, prompb.Label{ + Name: name, + Value: value, + }) + } + sort.Slice(sortedExternalLabels, func(i, j int) bool { + return sortedExternalLabels[i].Name < sortedExternalLabels[j].Name + }) + + responseType, err := NegotiateResponseType(req.AcceptedResponseTypes) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + switch responseType { + case prompb.ReadRequest_STREAMED_XOR_CHUNKS: + h.remoteReadStreamedXORChunks(ctx, w, req, externalLabels, sortedExternalLabels) + default: + // On empty or unknown types in req.AcceptedResponseTypes we default to non streamed, raw samples response. + h.remoteReadSamples(ctx, w, req, externalLabels, sortedExternalLabels) + } +} + +func (h *readHandler) remoteReadSamples( + ctx context.Context, + w http.ResponseWriter, + req *prompb.ReadRequest, + externalLabels map[string]string, + sortedExternalLabels []prompb.Label, +) { + w.Header().Set("Content-Type", "application/x-protobuf") + w.Header().Set("Content-Encoding", "snappy") + + resp := prompb.ReadResponse{ + Results: make([]*prompb.QueryResult, len(req.Queries)), + } + for i, query := range req.Queries { + if err := func() error { + filteredMatchers, err := filterExtLabelsFromMatchers(query.Matchers, externalLabels) + if err != nil { + return err + } + + querier, err := h.queryable.Querier(ctx, query.StartTimestampMs, query.EndTimestampMs) + if err != nil { + return err + } + defer func() { + if err := querier.Close(); err != nil { + level.Warn(h.logger).Log("msg", "Error on querier close", "err", err.Error()) + } + }() + + var hints *storage.SelectHints + if query.Hints != nil { + hints = &storage.SelectHints{ + Start: query.Hints.StartMs, + End: query.Hints.EndMs, + Step: query.Hints.StepMs, + Func: query.Hints.Func, + Grouping: query.Hints.Grouping, + Range: query.Hints.RangeMs, + By: query.Hints.By, + } + } + + var ws storage.Warnings + resp.Results[i], ws, err = ToQueryResult(querier.Select(false, hints, filteredMatchers...), h.remoteReadSampleLimit) + if err != nil { + return err + } + for _, w := range ws { + level.Warn(h.logger).Log("msg", "Warnings on remote read query", "err", w.Error()) + } + for _, ts := range resp.Results[i].Timeseries { + ts.Labels = MergeLabels(ts.Labels, sortedExternalLabels) + } + return nil + }(); err != nil { + if httpErr, ok := err.(HTTPError); ok { + http.Error(w, httpErr.Error(), httpErr.Status()) + return + } + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + + if err := EncodeReadResponse(&resp, w); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.ResponseWriter, req *prompb.ReadRequest, externalLabels map[string]string, sortedExternalLabels []prompb.Label) { + w.Header().Set("Content-Type", "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse") + + f, ok := w.(http.Flusher) + if !ok { + http.Error(w, "internal http.ResponseWriter does not implement http.Flusher interface", http.StatusInternalServerError) + return + } + + for i, query := range req.Queries { + if err := func() error { + filteredMatchers, err := filterExtLabelsFromMatchers(query.Matchers, externalLabels) + if err != nil { + return err + } + + querier, err := h.queryable.ChunkQuerier(ctx, query.StartTimestampMs, query.EndTimestampMs) + if err != nil { + return err + } + defer func() { + if err := querier.Close(); err != nil { + level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error()) + } + }() + + var hints *storage.SelectHints + if query.Hints != nil { + hints = &storage.SelectHints{ + Start: query.Hints.StartMs, + End: query.Hints.EndMs, + Step: query.Hints.StepMs, + Func: query.Hints.Func, + Grouping: query.Hints.Grouping, + Range: query.Hints.RangeMs, + By: query.Hints.By, + } + } + + ws, err := StreamChunkedReadResponses( + NewChunkedWriter(w, f), + int64(i), + // The streaming API has to provide the series sorted. + querier.Select(true, hints, filteredMatchers...), + sortedExternalLabels, + h.remoteReadMaxBytesInFrame, + ) + if err != nil { + return err + } + + for _, w := range ws { + level.Warn(h.logger).Log("msg", "Warnings on chunked remote read query", "warnings", w.Error()) + } + return nil + }(); err != nil { + if httpErr, ok := err.(HTTPError); ok { + http.Error(w, httpErr.Error(), httpErr.Status()) + return + } + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } +} + +// filterExtLabelsFromMatchers change equality matchers which match external labels +// to a matcher that looks for an empty label, +// as that label should not be present in the storage. +func filterExtLabelsFromMatchers(pbMatchers []*prompb.LabelMatcher, externalLabels map[string]string) ([]*labels.Matcher, error) { + matchers, err := FromLabelMatchers(pbMatchers) + if err != nil { + return nil, err + } + + filteredMatchers := make([]*labels.Matcher, 0, len(matchers)) + for _, m := range matchers { + value := externalLabels[m.Name] + if m.Type == labels.MatchEqual && value == m.Value { + matcher, err := labels.NewMatcher(labels.MatchEqual, m.Name, "") + if err != nil { + return nil, err + } + filteredMatchers = append(filteredMatchers, matcher) + } else { + filteredMatchers = append(filteredMatchers, m) + } + } + + return filteredMatchers, nil +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go new file mode 100644 index 00000000000..c82db1f9dff --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go @@ -0,0 +1,216 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "crypto/md5" + "encoding/hex" + "fmt" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "gopkg.in/yaml.v2" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/scrape" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/logging" +) + +// String constants for instrumentation. +const ( + namespace = "prometheus" + subsystem = "remote_storage" + remoteName = "remote_name" + endpoint = "url" +) + +type ReadyScrapeManager interface { + Get() (*scrape.Manager, error) +} + +// startTimeCallback is a callback func that return the oldest timestamp stored in a storage. +type startTimeCallback func() (int64, error) + +// Storage represents all the remote read and write endpoints. It implements +// storage.Storage. +type Storage struct { + logger *logging.Deduper + mtx sync.Mutex + + rws *WriteStorage + + // For reads. + queryables []storage.SampleAndChunkQueryable + localStartTimeCallback startTimeCallback +} + +// NewStorage returns a remote.Storage. +func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager) *Storage { + if l == nil { + l = log.NewNopLogger() + } + logger := logging.Dedupe(l, 1*time.Minute) + + s := &Storage{ + logger: logger, + localStartTimeCallback: stCallback, + } + s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm) + return s +} + +// ApplyConfig updates the state as the new config requires. +func (s *Storage) ApplyConfig(conf *config.Config) error { + s.mtx.Lock() + defer s.mtx.Unlock() + + if err := s.rws.ApplyConfig(conf); err != nil { + return err + } + + // Update read clients + readHashes := make(map[string]struct{}) + queryables := make([]storage.SampleAndChunkQueryable, 0, len(conf.RemoteReadConfigs)) + for _, rrConf := range conf.RemoteReadConfigs { + hash, err := toHash(rrConf) + if err != nil { + return err + } + + // Don't allow duplicate remote read configs. + if _, ok := readHashes[hash]; ok { + return fmt.Errorf("duplicate remote read configs are not allowed, found duplicate for URL: %s", rrConf.URL) + } + readHashes[hash] = struct{}{} + + // Set the queue name to the config hash if the user has not set + // a name in their remote write config so we can still differentiate + // between queues that have the same remote write endpoint. + name := hash[:6] + if rrConf.Name != "" { + name = rrConf.Name + } + + c, err := NewReadClient(name, &ClientConfig{ + URL: rrConf.URL, + Timeout: rrConf.RemoteTimeout, + HTTPClientConfig: rrConf.HTTPClientConfig, + Headers: rrConf.Headers, + }) + if err != nil { + return err + } + + queryables = append(queryables, NewSampleAndChunkQueryableClient( + c, + conf.GlobalConfig.ExternalLabels, + labelsToEqualityMatchers(rrConf.RequiredMatchers), + rrConf.ReadRecent, + s.localStartTimeCallback, + )) + } + s.queryables = queryables + + return nil +} + +// StartTime implements the Storage interface. +func (s *Storage) StartTime() (int64, error) { + return int64(model.Latest), nil +} + +// Querier returns a storage.MergeQuerier combining the remote client queriers +// of each configured remote read endpoint. +// Returned querier will never return error as all queryables are assumed best effort. +// Additionally all returned queriers ensure that its Select's SeriesSets have ready data after first `Next` invoke. +// This is because Prometheus (fanout and secondary queries) can't handle the stream failing half way through by design. +func (s *Storage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + s.mtx.Lock() + queryables := s.queryables + s.mtx.Unlock() + + queriers := make([]storage.Querier, 0, len(queryables)) + for _, queryable := range queryables { + q, err := queryable.Querier(ctx, mint, maxt) + if err != nil { + return nil, err + } + queriers = append(queriers, q) + } + return storage.NewMergeQuerier(nil, queriers, storage.ChainedSeriesMerge), nil +} + +// ChunkQuerier returns a storage.MergeQuerier combining the remote client queriers +// of each configured remote read endpoint. +func (s *Storage) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { + s.mtx.Lock() + queryables := s.queryables + s.mtx.Unlock() + + queriers := make([]storage.ChunkQuerier, 0, len(queryables)) + for _, queryable := range queryables { + q, err := queryable.ChunkQuerier(ctx, mint, maxt) + if err != nil { + return nil, err + } + queriers = append(queriers, q) + } + return storage.NewMergeChunkQuerier(nil, queriers, storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge)), nil +} + +// Appender implements storage.Storage. +func (s *Storage) Appender(ctx context.Context) storage.Appender { + return s.rws.Appender(ctx) +} + +// LowestSentTimestamp returns the lowest sent timestamp across all queues. +func (s *Storage) LowestSentTimestamp() int64 { + return s.rws.LowestSentTimestamp() +} + +// Close the background processing of the storage queues. +func (s *Storage) Close() error { + s.logger.Stop() + s.mtx.Lock() + defer s.mtx.Unlock() + return s.rws.Close() +} + +func labelsToEqualityMatchers(ls model.LabelSet) []*labels.Matcher { + ms := make([]*labels.Matcher, 0, len(ls)) + for k, v := range ls { + ms = append(ms, &labels.Matcher{ + Type: labels.MatchEqual, + Name: string(k), + Value: string(v), + }) + } + return ms +} + +// Used for hashing configs and diff'ing hashes in ApplyConfig. +func toHash(data interface{}) (string, error) { + bytes, err := yaml.Marshal(data) + if err != nil { + return "", err + } + hash := md5.Sum(bytes) + return hex.EncodeToString(hash[:]), nil +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write.go b/vendor/github.com/prometheus/prometheus/storage/remote/write.go new file mode 100644 index 00000000000..bd330fe8b4b --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write.go @@ -0,0 +1,284 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "fmt" + "math" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/wal" +) + +var ( + samplesIn = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "samples_in_total", + Help: "Samples in to remote storage, compare to samples out for queue managers.", + }) + exemplarsIn = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "exemplars_in_total", + Help: "Exemplars in to remote storage, compare to exemplars out for queue managers.", + }) +) + +// WriteStorage represents all the remote write storage. +type WriteStorage struct { + logger log.Logger + reg prometheus.Registerer + mtx sync.Mutex + + watcherMetrics *wal.WatcherMetrics + liveReaderMetrics *wal.LiveReaderMetrics + externalLabels labels.Labels + walDir string + queues map[string]*QueueManager + samplesIn *ewmaRate + flushDeadline time.Duration + interner *pool + scraper ReadyScrapeManager + quit chan struct{} + + // For timestampTracker. + highestTimestamp *maxTimestamp +} + +// NewWriteStorage creates and runs a WriteStorage. +func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager) *WriteStorage { + if logger == nil { + logger = log.NewNopLogger() + } + rws := &WriteStorage{ + queues: make(map[string]*QueueManager), + watcherMetrics: wal.NewWatcherMetrics(reg), + liveReaderMetrics: wal.NewLiveReaderMetrics(reg), + logger: logger, + reg: reg, + flushDeadline: flushDeadline, + samplesIn: newEWMARate(ewmaWeight, shardUpdateDuration), + walDir: walDir, + interner: newPool(), + scraper: sm, + quit: make(chan struct{}), + highestTimestamp: &maxTimestamp{ + Gauge: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "highest_timestamp_in_seconds", + Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.", + }), + }, + } + if reg != nil { + reg.MustRegister(rws.highestTimestamp) + } + go rws.run() + return rws +} + +func (rws *WriteStorage) run() { + ticker := time.NewTicker(shardUpdateDuration) + defer ticker.Stop() + for { + select { + case <-ticker.C: + rws.samplesIn.tick() + case <-rws.quit: + return + } + } +} + +// ApplyConfig updates the state as the new config requires. +// Only stop & create queues which have changes. +func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { + rws.mtx.Lock() + defer rws.mtx.Unlock() + + // Remote write queues only need to change if the remote write config or + // external labels change. + externalLabelUnchanged := labels.Equal(conf.GlobalConfig.ExternalLabels, rws.externalLabels) + rws.externalLabels = conf.GlobalConfig.ExternalLabels + + newQueues := make(map[string]*QueueManager) + newHashes := []string{} + for _, rwConf := range conf.RemoteWriteConfigs { + hash, err := toHash(rwConf) + if err != nil { + return err + } + + // Don't allow duplicate remote write configs. + if _, ok := newQueues[hash]; ok { + return fmt.Errorf("duplicate remote write configs are not allowed, found duplicate for URL: %s", rwConf.URL) + } + + // Set the queue name to the config hash if the user has not set + // a name in their remote write config so we can still differentiate + // between queues that have the same remote write endpoint. + name := hash[:6] + if rwConf.Name != "" { + name = rwConf.Name + } + + c, err := NewWriteClient(name, &ClientConfig{ + URL: rwConf.URL, + Timeout: rwConf.RemoteTimeout, + HTTPClientConfig: rwConf.HTTPClientConfig, + SigV4Config: rwConf.SigV4Config, + Headers: rwConf.Headers, + RetryOnRateLimit: rwConf.QueueConfig.RetryOnRateLimit, + }) + if err != nil { + return err + } + + queue, ok := rws.queues[hash] + if externalLabelUnchanged && ok { + // Update the client in case any secret configuration has changed. + queue.SetClient(c) + newQueues[hash] = queue + delete(rws.queues, hash) + continue + } + + // Redacted to remove any passwords in the URL (that are + // technically accepted but not recommended) since this is + // only used for metric labels. + endpoint := rwConf.URL.Redacted() + newQueues[hash] = NewQueueManager( + newQueueManagerMetrics(rws.reg, name, endpoint), + rws.watcherMetrics, + rws.liveReaderMetrics, + rws.logger, + rws.walDir, + rws.samplesIn, + rwConf.QueueConfig, + rwConf.MetadataConfig, + conf.GlobalConfig.ExternalLabels, + rwConf.WriteRelabelConfigs, + c, + rws.flushDeadline, + rws.interner, + rws.highestTimestamp, + rws.scraper, + rwConf.SendExemplars, + ) + // Keep track of which queues are new so we know which to start. + newHashes = append(newHashes, hash) + } + + // Anything remaining in rws.queues is a queue who's config has + // changed or was removed from the overall remote write config. + for _, q := range rws.queues { + q.Stop() + } + + for _, hash := range newHashes { + newQueues[hash].Start() + } + + rws.queues = newQueues + + return nil +} + +// Appender implements storage.Storage. +func (rws *WriteStorage) Appender(_ context.Context) storage.Appender { + return ×tampTracker{ + writeStorage: rws, + highestRecvTimestamp: rws.highestTimestamp, + } +} + +// LowestSentTimestamp returns the lowest sent timestamp across all queues. +func (rws *WriteStorage) LowestSentTimestamp() int64 { + rws.mtx.Lock() + defer rws.mtx.Unlock() + + var lowestTs int64 = math.MaxInt64 + + for _, q := range rws.queues { + ts := int64(q.metrics.highestSentTimestamp.Get() * 1000) + if ts < lowestTs { + lowestTs = ts + } + } + if len(rws.queues) == 0 { + lowestTs = 0 + } + + return lowestTs +} + +// Close closes the WriteStorage. +func (rws *WriteStorage) Close() error { + rws.mtx.Lock() + defer rws.mtx.Unlock() + for _, q := range rws.queues { + q.Stop() + } + close(rws.quit) + return nil +} + +type timestampTracker struct { + writeStorage *WriteStorage + samples int64 + exemplars int64 + highestTimestamp int64 + highestRecvTimestamp *maxTimestamp +} + +// Append implements storage.Appender. +func (t *timestampTracker) Append(_ storage.SeriesRef, _ labels.Labels, ts int64, _ float64) (storage.SeriesRef, error) { + t.samples++ + if ts > t.highestTimestamp { + t.highestTimestamp = ts + } + return 0, nil +} + +func (t *timestampTracker) AppendExemplar(_ storage.SeriesRef, _ labels.Labels, _ exemplar.Exemplar) (storage.SeriesRef, error) { + t.exemplars++ + return 0, nil +} + +// Commit implements storage.Appender. +func (t *timestampTracker) Commit() error { + t.writeStorage.samplesIn.incr(t.samples + t.exemplars) + + samplesIn.Add(float64(t.samples)) + exemplarsIn.Add(float64(t.exemplars)) + t.highestRecvTimestamp.Set(float64(t.highestTimestamp / 1000)) + return nil +} + +// Rollback implements storage.Appender. +func (*timestampTracker) Rollback() error { + return nil +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go new file mode 100644 index 00000000000..42e1060385c --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -0,0 +1,123 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "fmt" + "net/http" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/storage" +) + +type writeHandler struct { + logger log.Logger + appendable storage.Appendable +} + +// NewWriteHandler creates a http.Handler that accepts remote write requests and +// writes them to the provided appendable. +func NewWriteHandler(logger log.Logger, appendable storage.Appendable) http.Handler { + return &writeHandler{ + logger: logger, + appendable: appendable, + } +} + +func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + req, err := DecodeWriteRequest(r.Body) + if err != nil { + level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + err = h.write(r.Context(), req) + switch err { + case nil: + case storage.ErrOutOfOrderSample, storage.ErrOutOfBounds, storage.ErrDuplicateSampleForTimestamp: + // Indicated an out of order sample is a bad request to prevent retries. + level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error()) + http.Error(w, err.Error(), http.StatusBadRequest) + return + default: + level.Error(h.logger).Log("msg", "Error appending remote write", "err", err.Error()) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusNoContent) +} + +// checkAppendExemplarError modifies the AppendExamplar's returned error based on the error cause. +func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, outOfOrderErrs *int) error { + switch errors.Cause(err) { + case storage.ErrNotFound: + return storage.ErrNotFound + case storage.ErrOutOfOrderExemplar: + *outOfOrderErrs++ + level.Debug(h.logger).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e)) + return nil + default: + return err + } +} + +func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) { + outOfOrderExemplarErrs := 0 + + app := h.appendable.Appender(ctx) + defer func() { + if err != nil { + _ = app.Rollback() + return + } + err = app.Commit() + }() + + var exemplarErr error + for _, ts := range req.Timeseries { + labels := labelProtosToLabels(ts.Labels) + for _, s := range ts.Samples { + _, err = app.Append(0, labels, s.Timestamp, s.Value) + if err != nil { + return err + } + + } + + for _, ep := range ts.Exemplars { + e := exemplarProtoToExemplar(ep) + + _, exemplarErr = app.AppendExemplar(0, labels, e) + exemplarErr = h.checkAppendExemplarError(exemplarErr, e, &outOfOrderExemplarErrs) + if exemplarErr != nil { + // Since exemplar storage is still experimental, we don't fail the request on ingestion errors. + level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr) + } + } + } + + if outOfOrderExemplarErrs > 0 { + _ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) + } + + return nil +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go new file mode 100644 index 00000000000..12a4047396f --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go @@ -0,0 +1,318 @@ +// Copyright 2018 The Prometheus Authors + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package record contains the various record types used for encoding various Head block data in the WAL and in-memory snapshot. +package record + +import ( + "math" + "sort" + + "github.com/pkg/errors" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/encoding" + "github.com/prometheus/prometheus/tsdb/tombstones" +) + +// Type represents the data type of a record. +type Type uint8 + +const ( + // Unknown is returned for unrecognised WAL record types. + Unknown Type = 255 + // Series is used to match WAL records of type Series. + Series Type = 1 + // Samples is used to match WAL records of type Samples. + Samples Type = 2 + // Tombstones is used to match WAL records of type Tombstones. + Tombstones Type = 3 + // Exemplars is used to match WAL records of type Exemplars. + Exemplars Type = 4 +) + +// ErrNotFound is returned if a looked up resource was not found. Duplicate ErrNotFound from head.go. +var ErrNotFound = errors.New("not found") + +// RefSeries is the series labels with the series ID. +type RefSeries struct { + Ref chunks.HeadSeriesRef + Labels labels.Labels +} + +// RefSample is a timestamp/value pair associated with a reference to a series. +type RefSample struct { + Ref chunks.HeadSeriesRef + T int64 + V float64 +} + +// RefExemplar is an exemplar with it's labels, timestamp, value the exemplar was collected/observed with, and a reference to a series. +type RefExemplar struct { + Ref chunks.HeadSeriesRef + T int64 + V float64 + Labels labels.Labels +} + +// Decoder decodes series, sample, and tombstone records. +// The zero value is ready to use. +type Decoder struct{} + +// Type returns the type of the record. +// Returns RecordUnknown if no valid record type is found. +func (d *Decoder) Type(rec []byte) Type { + if len(rec) < 1 { + return Unknown + } + switch t := Type(rec[0]); t { + case Series, Samples, Tombstones, Exemplars: + return t + } + return Unknown +} + +// Series appends series in rec to the given slice. +func (d *Decoder) Series(rec []byte, series []RefSeries) ([]RefSeries, error) { + dec := encoding.Decbuf{B: rec} + + if Type(dec.Byte()) != Series { + return nil, errors.New("invalid record type") + } + for len(dec.B) > 0 && dec.Err() == nil { + ref := storage.SeriesRef(dec.Be64()) + + lset := make(labels.Labels, dec.Uvarint()) + + for i := range lset { + lset[i].Name = dec.UvarintStr() + lset[i].Value = dec.UvarintStr() + } + sort.Sort(lset) + + series = append(series, RefSeries{ + Ref: chunks.HeadSeriesRef(ref), + Labels: lset, + }) + } + if dec.Err() != nil { + return nil, dec.Err() + } + if len(dec.B) > 0 { + return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + } + return series, nil +} + +// Samples appends samples in rec to the given slice. +func (d *Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) { + dec := encoding.Decbuf{B: rec} + + if Type(dec.Byte()) != Samples { + return nil, errors.New("invalid record type") + } + if dec.Len() == 0 { + return samples, nil + } + var ( + baseRef = dec.Be64() + baseTime = dec.Be64int64() + ) + for len(dec.B) > 0 && dec.Err() == nil { + dref := dec.Varint64() + dtime := dec.Varint64() + val := dec.Be64() + + samples = append(samples, RefSample{ + Ref: chunks.HeadSeriesRef(int64(baseRef) + dref), + T: baseTime + dtime, + V: math.Float64frombits(val), + }) + } + + if dec.Err() != nil { + return nil, errors.Wrapf(dec.Err(), "decode error after %d samples", len(samples)) + } + if len(dec.B) > 0 { + return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + } + return samples, nil +} + +// Tombstones appends tombstones in rec to the given slice. +func (d *Decoder) Tombstones(rec []byte, tstones []tombstones.Stone) ([]tombstones.Stone, error) { + dec := encoding.Decbuf{B: rec} + + if Type(dec.Byte()) != Tombstones { + return nil, errors.New("invalid record type") + } + for dec.Len() > 0 && dec.Err() == nil { + tstones = append(tstones, tombstones.Stone{ + Ref: storage.SeriesRef(dec.Be64()), + Intervals: tombstones.Intervals{ + {Mint: dec.Varint64(), Maxt: dec.Varint64()}, + }, + }) + } + if dec.Err() != nil { + return nil, dec.Err() + } + if len(dec.B) > 0 { + return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + } + return tstones, nil +} + +func (d *Decoder) Exemplars(rec []byte, exemplars []RefExemplar) ([]RefExemplar, error) { + dec := encoding.Decbuf{B: rec} + t := Type(dec.Byte()) + if t != Exemplars { + return nil, errors.New("invalid record type") + } + + return d.ExemplarsFromBuffer(&dec, exemplars) +} + +func (d *Decoder) ExemplarsFromBuffer(dec *encoding.Decbuf, exemplars []RefExemplar) ([]RefExemplar, error) { + if dec.Len() == 0 { + return exemplars, nil + } + var ( + baseRef = dec.Be64() + baseTime = dec.Be64int64() + ) + for len(dec.B) > 0 && dec.Err() == nil { + dref := dec.Varint64() + dtime := dec.Varint64() + val := dec.Be64() + + lset := make(labels.Labels, dec.Uvarint()) + for i := range lset { + lset[i].Name = dec.UvarintStr() + lset[i].Value = dec.UvarintStr() + } + sort.Sort(lset) + + exemplars = append(exemplars, RefExemplar{ + Ref: chunks.HeadSeriesRef(baseRef + uint64(dref)), + T: baseTime + dtime, + V: math.Float64frombits(val), + Labels: lset, + }) + } + + if dec.Err() != nil { + return nil, errors.Wrapf(dec.Err(), "decode error after %d exemplars", len(exemplars)) + } + if len(dec.B) > 0 { + return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + } + return exemplars, nil +} + +// Encoder encodes series, sample, and tombstones records. +// The zero value is ready to use. +type Encoder struct{} + +// Series appends the encoded series to b and returns the resulting slice. +func (e *Encoder) Series(series []RefSeries, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(Series)) + + for _, s := range series { + buf.PutBE64(uint64(s.Ref)) + buf.PutUvarint(len(s.Labels)) + + for _, l := range s.Labels { + buf.PutUvarintStr(l.Name) + buf.PutUvarintStr(l.Value) + } + } + return buf.Get() +} + +// Samples appends the encoded samples to b and returns the resulting slice. +func (e *Encoder) Samples(samples []RefSample, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(Samples)) + + if len(samples) == 0 { + return buf.Get() + } + + // Store base timestamp and base reference number of first sample. + // All samples encode their timestamp and ref as delta to those. + first := samples[0] + + buf.PutBE64(uint64(first.Ref)) + buf.PutBE64int64(first.T) + + for _, s := range samples { + buf.PutVarint64(int64(s.Ref) - int64(first.Ref)) + buf.PutVarint64(s.T - first.T) + buf.PutBE64(math.Float64bits(s.V)) + } + return buf.Get() +} + +// Tombstones appends the encoded tombstones to b and returns the resulting slice. +func (e *Encoder) Tombstones(tstones []tombstones.Stone, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(Tombstones)) + + for _, s := range tstones { + for _, iv := range s.Intervals { + buf.PutBE64(uint64(s.Ref)) + buf.PutVarint64(iv.Mint) + buf.PutVarint64(iv.Maxt) + } + } + return buf.Get() +} + +func (e *Encoder) Exemplars(exemplars []RefExemplar, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(Exemplars)) + + if len(exemplars) == 0 { + return buf.Get() + } + + e.EncodeExemplarsIntoBuffer(exemplars, &buf) + + return buf.Get() +} + +func (e *Encoder) EncodeExemplarsIntoBuffer(exemplars []RefExemplar, buf *encoding.Encbuf) { + // Store base timestamp and base reference number of first sample. + // All samples encode their timestamp and ref as delta to those. + first := exemplars[0] + + buf.PutBE64(uint64(first.Ref)) + buf.PutBE64int64(first.T) + + for _, ex := range exemplars { + buf.PutVarint64(int64(ex.Ref) - int64(first.Ref)) + buf.PutVarint64(ex.T - first.T) + buf.PutBE64(math.Float64bits(ex.V)) + + buf.PutUvarint(len(ex.Labels)) + for _, l := range ex.Labels { + buf.PutUvarintStr(l.Name) + buf.PutUvarintStr(l.Value) + } + } +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go b/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go new file mode 100644 index 00000000000..621e104714d --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go @@ -0,0 +1,373 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tombstones + +import ( + "encoding/binary" + "fmt" + "hash" + "hash/crc32" + "io/ioutil" + "os" + "path/filepath" + "sort" + "sync" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/encoding" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/prometheus/prometheus/tsdb/fileutil" +) + +const TombstonesFilename = "tombstones" + +const ( + // MagicTombstone is 4 bytes at the head of a tombstone file. + MagicTombstone = 0x0130BA30 + + tombstoneFormatV1 = 1 + tombstoneFormatVersionSize = 1 + tombstonesHeaderSize = 5 + tombstonesCRCSize = 4 +) + +// The table gets initialized with sync.Once but may still cause a race +// with any other use of the crc32 package anywhere. Thus we initialize it +// before. +var castagnoliTable *crc32.Table + +func init() { + castagnoliTable = crc32.MakeTable(crc32.Castagnoli) +} + +// newCRC32 initializes a CRC32 hash with a preconfigured polynomial, so the +// polynomial may be easily changed in one location at a later time, if necessary. +func newCRC32() hash.Hash32 { + return crc32.New(castagnoliTable) +} + +// Reader gives access to tombstone intervals by series reference. +type Reader interface { + // Get returns deletion intervals for the series with the given reference. + Get(ref storage.SeriesRef) (Intervals, error) + + // Iter calls the given function for each encountered interval. + Iter(func(storage.SeriesRef, Intervals) error) error + + // Total returns the total count of tombstones. + Total() uint64 + + // Close any underlying resources + Close() error +} + +func WriteFile(logger log.Logger, dir string, tr Reader) (int64, error) { + path := filepath.Join(dir, TombstonesFilename) + tmp := path + ".tmp" + hash := newCRC32() + var size int + + f, err := os.Create(tmp) + if err != nil { + return 0, err + } + defer func() { + if f != nil { + if err := f.Close(); err != nil { + level.Error(logger).Log("msg", "close tmp file", "err", err.Error()) + } + } + if err := os.RemoveAll(tmp); err != nil { + level.Error(logger).Log("msg", "remove tmp file", "err", err.Error()) + } + }() + + buf := encoding.Encbuf{B: make([]byte, 3*binary.MaxVarintLen64)} + buf.Reset() + // Write the meta. + buf.PutBE32(MagicTombstone) + n, err := f.Write(buf.Get()) + if err != nil { + return 0, err + } + size += n + + bytes, err := Encode(tr) + if err != nil { + return 0, errors.Wrap(err, "encoding tombstones") + } + + // Ignore first byte which is the format type. We do this for compatibility. + if _, err := hash.Write(bytes[tombstoneFormatVersionSize:]); err != nil { + return 0, errors.Wrap(err, "calculating hash for tombstones") + } + + n, err = f.Write(bytes) + if err != nil { + return 0, errors.Wrap(err, "writing tombstones") + } + size += n + + n, err = f.Write(hash.Sum(nil)) + if err != nil { + return 0, err + } + size += n + + if err := f.Sync(); err != nil { + return 0, tsdb_errors.NewMulti(err, f.Close()).Err() + } + + if err = f.Close(); err != nil { + return 0, err + } + f = nil + return int64(size), fileutil.Replace(tmp, path) +} + +// Encode encodes the tombstones from the reader. +// It does not attach any magic number or checksum. +func Encode(tr Reader) ([]byte, error) { + buf := encoding.Encbuf{} + buf.PutByte(tombstoneFormatV1) + err := tr.Iter(func(ref storage.SeriesRef, ivs Intervals) error { + for _, iv := range ivs { + buf.PutUvarint64(uint64(ref)) + buf.PutVarint64(iv.Mint) + buf.PutVarint64(iv.Maxt) + } + return nil + }) + return buf.Get(), err +} + +// Decode decodes the tombstones from the bytes +// which was encoded using the Encode method. +func Decode(b []byte) (Reader, error) { + d := &encoding.Decbuf{B: b} + if flag := d.Byte(); flag != tombstoneFormatV1 { + return nil, errors.Errorf("invalid tombstone format %x", flag) + } + + if d.Err() != nil { + return nil, d.Err() + } + + stonesMap := NewMemTombstones() + for d.Len() > 0 { + k := storage.SeriesRef(d.Uvarint64()) + mint := d.Varint64() + maxt := d.Varint64() + if d.Err() != nil { + return nil, d.Err() + } + + stonesMap.AddInterval(k, Interval{mint, maxt}) + } + return stonesMap, nil +} + +// Stone holds the information on the posting and time-range +// that is deleted. +type Stone struct { + Ref storage.SeriesRef + Intervals Intervals +} + +func ReadTombstones(dir string) (Reader, int64, error) { + b, err := ioutil.ReadFile(filepath.Join(dir, TombstonesFilename)) + if os.IsNotExist(err) { + return NewMemTombstones(), 0, nil + } else if err != nil { + return nil, 0, err + } + + if len(b) < tombstonesHeaderSize { + return nil, 0, errors.Wrap(encoding.ErrInvalidSize, "tombstones header") + } + + d := &encoding.Decbuf{B: b[:len(b)-tombstonesCRCSize]} + if mg := d.Be32(); mg != MagicTombstone { + return nil, 0, fmt.Errorf("invalid magic number %x", mg) + } + + // Verify checksum. + hash := newCRC32() + // Ignore first byte which is the format type. + if _, err := hash.Write(d.Get()[tombstoneFormatVersionSize:]); err != nil { + return nil, 0, errors.Wrap(err, "write to hash") + } + if binary.BigEndian.Uint32(b[len(b)-tombstonesCRCSize:]) != hash.Sum32() { + return nil, 0, errors.New("checksum did not match") + } + + if d.Err() != nil { + return nil, 0, d.Err() + } + + stonesMap, err := Decode(d.Get()) + if err != nil { + return nil, 0, err + } + + return stonesMap, int64(len(b)), nil +} + +type MemTombstones struct { + intvlGroups map[storage.SeriesRef]Intervals + mtx sync.RWMutex +} + +// NewMemTombstones creates new in memory Tombstone Reader +// that allows adding new intervals. +func NewMemTombstones() *MemTombstones { + return &MemTombstones{intvlGroups: make(map[storage.SeriesRef]Intervals)} +} + +func NewTestMemTombstones(intervals []Intervals) *MemTombstones { + ret := NewMemTombstones() + for i, intervalsGroup := range intervals { + for _, interval := range intervalsGroup { + ret.AddInterval(storage.SeriesRef(i+1), interval) + } + } + return ret +} + +func (t *MemTombstones) Get(ref storage.SeriesRef) (Intervals, error) { + t.mtx.RLock() + defer t.mtx.RUnlock() + return t.intvlGroups[ref], nil +} + +func (t *MemTombstones) DeleteTombstones(refs map[storage.SeriesRef]struct{}) { + t.mtx.Lock() + defer t.mtx.Unlock() + for ref := range refs { + delete(t.intvlGroups, ref) + } +} + +func (t *MemTombstones) TruncateBefore(beforeT int64) { + t.mtx.Lock() + defer t.mtx.Unlock() + for ref, ivs := range t.intvlGroups { + i := len(ivs) - 1 + for ; i >= 0; i-- { + if beforeT > ivs[i].Maxt { + break + } + } + if len(ivs[i+1:]) == 0 { + delete(t.intvlGroups, ref) + } else { + newIvs := make(Intervals, len(ivs[i+1:])) + copy(newIvs, ivs[i+1:]) + t.intvlGroups[ref] = newIvs + } + } +} + +func (t *MemTombstones) Iter(f func(storage.SeriesRef, Intervals) error) error { + t.mtx.RLock() + defer t.mtx.RUnlock() + for ref, ivs := range t.intvlGroups { + if err := f(ref, ivs); err != nil { + return err + } + } + return nil +} + +func (t *MemTombstones) Total() uint64 { + t.mtx.RLock() + defer t.mtx.RUnlock() + + total := uint64(0) + for _, ivs := range t.intvlGroups { + total += uint64(len(ivs)) + } + return total +} + +// AddInterval to an existing memTombstones. +func (t *MemTombstones) AddInterval(ref storage.SeriesRef, itvs ...Interval) { + t.mtx.Lock() + defer t.mtx.Unlock() + for _, itv := range itvs { + t.intvlGroups[ref] = t.intvlGroups[ref].Add(itv) + } +} + +func (*MemTombstones) Close() error { + return nil +} + +// Interval represents a single time-interval. +type Interval struct { + Mint, Maxt int64 +} + +func (tr Interval) InBounds(t int64) bool { + return t >= tr.Mint && t <= tr.Maxt +} + +func (tr Interval) IsSubrange(dranges Intervals) bool { + for _, r := range dranges { + if r.InBounds(tr.Mint) && r.InBounds(tr.Maxt) { + return true + } + } + + return false +} + +// Intervals represents a set of increasing and non-overlapping time-intervals. +type Intervals []Interval + +// Add the new time-range to the existing ones. +// The existing ones must be sorted. +func (in Intervals) Add(n Interval) Intervals { + if len(in) == 0 { + return append(in, n) + } + // Find min and max indexes of intervals that overlap with the new interval. + // Intervals are closed [t1, t2] and t is discreet, so if neighbour intervals are 1 step difference + // to the new one, we can merge those together. + mini := sort.Search(len(in), func(i int) bool { return in[i].Maxt >= n.Mint-1 }) + if mini == len(in) { + return append(in, n) + } + + maxi := sort.Search(len(in)-mini, func(i int) bool { return in[mini+i].Mint > n.Maxt+1 }) + if maxi == 0 { + if mini == 0 { + return append(Intervals{n}, in...) + } + return append(in[:mini], append(Intervals{n}, in[mini:]...)...) + } + + if n.Mint < in[mini].Mint { + in[mini].Mint = n.Mint + } + in[mini].Maxt = in[maxi+mini-1].Maxt + if n.Maxt > in[mini].Maxt { + in[mini].Maxt = n.Maxt + } + return append(in[:mini+1], in[maxi+mini:]...) +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go new file mode 100644 index 00000000000..9064beed061 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go @@ -0,0 +1,329 @@ +// Copyright 2018 The Prometheus Authors + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "fmt" + "io" + "io/ioutil" + "math" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + + "github.com/prometheus/prometheus/tsdb/chunks" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/prometheus/prometheus/tsdb/fileutil" + "github.com/prometheus/prometheus/tsdb/record" + "github.com/prometheus/prometheus/tsdb/tombstones" +) + +// CheckpointStats returns stats about a created checkpoint. +type CheckpointStats struct { + DroppedSeries int + DroppedSamples int + DroppedTombstones int + DroppedExemplars int + TotalSeries int // Processed series including dropped ones. + TotalSamples int // Processed samples including dropped ones. + TotalTombstones int // Processed tombstones including dropped ones. + TotalExemplars int // Processed exemplars including dropped ones. +} + +// LastCheckpoint returns the directory name and index of the most recent checkpoint. +// If dir does not contain any checkpoints, ErrNotFound is returned. +func LastCheckpoint(dir string) (string, int, error) { + checkpoints, err := listCheckpoints(dir) + if err != nil { + return "", 0, err + } + + if len(checkpoints) == 0 { + return "", 0, record.ErrNotFound + } + + checkpoint := checkpoints[len(checkpoints)-1] + return filepath.Join(dir, checkpoint.name), checkpoint.index, nil +} + +// DeleteCheckpoints deletes all checkpoints in a directory below a given index. +func DeleteCheckpoints(dir string, maxIndex int) error { + checkpoints, err := listCheckpoints(dir) + if err != nil { + return err + } + + errs := tsdb_errors.NewMulti() + for _, checkpoint := range checkpoints { + if checkpoint.index >= maxIndex { + break + } + errs.Add(os.RemoveAll(filepath.Join(dir, checkpoint.name))) + } + return errs.Err() +} + +const checkpointPrefix = "checkpoint." + +// Checkpoint creates a compacted checkpoint of segments in range [from, to] in the given WAL. +// It includes the most recent checkpoint if it exists. +// All series not satisfying keep and samples/tombstones/exemplars below mint are dropped. +// +// The checkpoint is stored in a directory named checkpoint.N in the same +// segmented format as the original WAL itself. +// This makes it easy to read it through the WAL package and concatenate +// it with the original WAL. +func Checkpoint(logger log.Logger, w *WAL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) { + stats := &CheckpointStats{} + var sgmReader io.ReadCloser + + level.Info(logger).Log("msg", "Creating checkpoint", "from_segment", from, "to_segment", to, "mint", mint) + + { + var sgmRange []SegmentRange + dir, idx, err := LastCheckpoint(w.Dir()) + if err != nil && err != record.ErrNotFound { + return nil, errors.Wrap(err, "find last checkpoint") + } + last := idx + 1 + if err == nil { + if from > last { + return nil, fmt.Errorf("unexpected gap to last checkpoint. expected:%v, requested:%v", last, from) + } + // Ignore WAL files below the checkpoint. They shouldn't exist to begin with. + from = last + + sgmRange = append(sgmRange, SegmentRange{Dir: dir, Last: math.MaxInt32}) + } + + sgmRange = append(sgmRange, SegmentRange{Dir: w.Dir(), First: from, Last: to}) + sgmReader, err = NewSegmentsRangeReader(sgmRange...) + if err != nil { + return nil, errors.Wrap(err, "create segment reader") + } + defer sgmReader.Close() + } + + cpdir := checkpointDir(w.Dir(), to) + cpdirtmp := cpdir + ".tmp" + + if err := os.RemoveAll(cpdirtmp); err != nil { + return nil, errors.Wrap(err, "remove previous temporary checkpoint dir") + } + + if err := os.MkdirAll(cpdirtmp, 0o777); err != nil { + return nil, errors.Wrap(err, "create checkpoint dir") + } + cp, err := New(nil, nil, cpdirtmp, w.CompressionEnabled()) + if err != nil { + return nil, errors.Wrap(err, "open checkpoint") + } + + // Ensures that an early return caused by an error doesn't leave any tmp files. + defer func() { + cp.Close() + os.RemoveAll(cpdirtmp) + }() + + r := NewReader(sgmReader) + + var ( + series []record.RefSeries + samples []record.RefSample + tstones []tombstones.Stone + exemplars []record.RefExemplar + dec record.Decoder + enc record.Encoder + buf []byte + recs [][]byte + ) + for r.Next() { + series, samples, tstones, exemplars = series[:0], samples[:0], tstones[:0], exemplars[:0] + + // We don't reset the buffer since we batch up multiple records + // before writing them to the checkpoint. + // Remember where the record for this iteration starts. + start := len(buf) + rec := r.Record() + + switch dec.Type(rec) { + case record.Series: + series, err = dec.Series(rec, series) + if err != nil { + return nil, errors.Wrap(err, "decode series") + } + // Drop irrelevant series in place. + repl := series[:0] + for _, s := range series { + if keep(s.Ref) { + repl = append(repl, s) + } + } + if len(repl) > 0 { + buf = enc.Series(repl, buf) + } + stats.TotalSeries += len(series) + stats.DroppedSeries += len(series) - len(repl) + + case record.Samples: + samples, err = dec.Samples(rec, samples) + if err != nil { + return nil, errors.Wrap(err, "decode samples") + } + // Drop irrelevant samples in place. + repl := samples[:0] + for _, s := range samples { + if s.T >= mint { + repl = append(repl, s) + } + } + if len(repl) > 0 { + buf = enc.Samples(repl, buf) + } + stats.TotalSamples += len(samples) + stats.DroppedSamples += len(samples) - len(repl) + + case record.Tombstones: + tstones, err = dec.Tombstones(rec, tstones) + if err != nil { + return nil, errors.Wrap(err, "decode deletes") + } + // Drop irrelevant tombstones in place. + repl := tstones[:0] + for _, s := range tstones { + for _, iv := range s.Intervals { + if iv.Maxt >= mint { + repl = append(repl, s) + break + } + } + } + if len(repl) > 0 { + buf = enc.Tombstones(repl, buf) + } + stats.TotalTombstones += len(tstones) + stats.DroppedTombstones += len(tstones) - len(repl) + + case record.Exemplars: + exemplars, err = dec.Exemplars(rec, exemplars) + if err != nil { + return nil, errors.Wrap(err, "decode exemplars") + } + // Drop irrelevant exemplars in place. + repl := exemplars[:0] + for _, e := range exemplars { + if e.T >= mint { + repl = append(repl, e) + } + } + if len(repl) > 0 { + buf = enc.Exemplars(repl, buf) + } + stats.TotalExemplars += len(exemplars) + stats.DroppedExemplars += len(exemplars) - len(repl) + default: + // Unknown record type, probably from a future Prometheus version. + continue + } + if len(buf[start:]) == 0 { + continue // All contents discarded. + } + recs = append(recs, buf[start:]) + + // Flush records in 1 MB increments. + if len(buf) > 1*1024*1024 { + if err := cp.Log(recs...); err != nil { + return nil, errors.Wrap(err, "flush records") + } + buf, recs = buf[:0], recs[:0] + } + } + // If we hit any corruption during checkpointing, repairing is not an option. + // The head won't know which series records are lost. + if r.Err() != nil { + return nil, errors.Wrap(r.Err(), "read segments") + } + + // Flush remaining records. + if err := cp.Log(recs...); err != nil { + return nil, errors.Wrap(err, "flush records") + } + if err := cp.Close(); err != nil { + return nil, errors.Wrap(err, "close checkpoint") + } + + // Sync temporary directory before rename. + df, err := fileutil.OpenDir(cpdirtmp) + if err != nil { + return nil, errors.Wrap(err, "open temporary checkpoint directory") + } + if err := df.Sync(); err != nil { + df.Close() + return nil, errors.Wrap(err, "sync temporary checkpoint directory") + } + if err = df.Close(); err != nil { + return nil, errors.Wrap(err, "close temporary checkpoint directory") + } + + if err := fileutil.Replace(cpdirtmp, cpdir); err != nil { + return nil, errors.Wrap(err, "rename checkpoint directory") + } + + return stats, nil +} + +func checkpointDir(dir string, i int) string { + return filepath.Join(dir, fmt.Sprintf(checkpointPrefix+"%08d", i)) +} + +type checkpointRef struct { + name string + index int +} + +func listCheckpoints(dir string) (refs []checkpointRef, err error) { + files, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + + for i := 0; i < len(files); i++ { + fi := files[i] + if !strings.HasPrefix(fi.Name(), checkpointPrefix) { + continue + } + if !fi.IsDir() { + return nil, errors.Errorf("checkpoint %s is not a directory", fi.Name()) + } + idx, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):]) + if err != nil { + continue + } + + refs = append(refs, checkpointRef{name: fi.Name(), index: idx}) + } + + sort.Slice(refs, func(i, j int) bool { + return refs[i].index < refs[j].index + }) + + return refs, nil +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/live_reader.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/live_reader.go new file mode 100644 index 00000000000..f09d149aa39 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/live_reader.go @@ -0,0 +1,322 @@ +// Copyright 2019 The Prometheus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "encoding/binary" + "fmt" + "hash/crc32" + "io" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/golang/snappy" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" +) + +// LiveReaderMetrics holds all metrics exposed by the LiveReader. +type LiveReaderMetrics struct { + readerCorruptionErrors *prometheus.CounterVec +} + +// NewLiveReaderMetrics instantiates, registers and returns metrics to be injected +// at LiveReader instantiation. +func NewLiveReaderMetrics(reg prometheus.Registerer) *LiveReaderMetrics { + m := &LiveReaderMetrics{ + readerCorruptionErrors: prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "prometheus_tsdb_wal_reader_corruption_errors_total", + Help: "Errors encountered when reading the WAL.", + }, []string{"error"}), + } + + if reg != nil { + reg.MustRegister(m.readerCorruptionErrors) + } + + return m +} + +// NewLiveReader returns a new live reader. +func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) *LiveReader { + lr := &LiveReader{ + logger: logger, + rdr: r, + metrics: metrics, + + // Until we understand how they come about, make readers permissive + // to records spanning pages. + permissive: true, + } + + return lr +} + +// LiveReader reads WAL records from an io.Reader. It allows reading of WALs +// that are still in the process of being written, and returns records as soon +// as they can be read. +type LiveReader struct { + logger log.Logger + rdr io.Reader + err error + rec []byte + snappyBuf []byte + hdr [recordHeaderSize]byte + buf [pageSize]byte + readIndex int // Index in buf to start at for next read. + writeIndex int // Index in buf to start at for next write. + total int64 // Total bytes processed during reading in calls to Next(). + index int // Used to track partial records, should be 0 at the start of every new record. + + // For testing, we can treat EOF as a non-error. + eofNonErr bool + + // We sometime see records span page boundaries. Should never happen, but it + // does. Until we track down why, set permissive to true to tolerate it. + // NB the non-ive Reader implementation allows for this. + permissive bool + + metrics *LiveReaderMetrics +} + +// Err returns any errors encountered reading the WAL. io.EOFs are not terminal +// and Next can be tried again. Non-EOFs are terminal, and the reader should +// not be used again. It is up to the user to decide when to stop trying should +// io.EOF be returned. +func (r *LiveReader) Err() error { + if r.eofNonErr && r.err == io.EOF { + return nil + } + return r.err +} + +// Offset returns the number of bytes consumed from this segment. +func (r *LiveReader) Offset() int64 { + return r.total +} + +func (r *LiveReader) fillBuffer() (int, error) { + n, err := r.rdr.Read(r.buf[r.writeIndex:len(r.buf)]) + r.writeIndex += n + return n, err +} + +// Next returns true if Record() will contain a full record. +// If Next returns false, you should always checked the contents of Error(). +// Return false guarantees there are no more records if the segment is closed +// and not corrupt, otherwise if Err() == io.EOF you should try again when more +// data has been written. +func (r *LiveReader) Next() bool { + for { + // If buildRecord returns a non-EOF error, its game up - the segment is + // corrupt. If buildRecord returns an EOF, we try and read more in + // fillBuffer later on. If that fails to read anything (n=0 && err=EOF), + // we return EOF and the user can try again later. If we have a full + // page, buildRecord is guaranteed to return a record or a non-EOF; it + // has checks the records fit in pages. + if ok, err := r.buildRecord(); ok { + return true + } else if err != nil && err != io.EOF { + r.err = err + return false + } + + // If we've filled the page and not found a record, this + // means records have started to span pages. Shouldn't happen + // but does and until we found out why, we need to deal with this. + if r.permissive && r.writeIndex == pageSize && r.readIndex > 0 { + copy(r.buf[:], r.buf[r.readIndex:]) + r.writeIndex -= r.readIndex + r.readIndex = 0 + continue + } + + if r.readIndex == pageSize { + r.writeIndex = 0 + r.readIndex = 0 + } + + if r.writeIndex != pageSize { + n, err := r.fillBuffer() + if n == 0 || (err != nil && err != io.EOF) { + r.err = err + return false + } + } + } +} + +// Record returns the current record. +// The returned byte slice is only valid until the next call to Next. +func (r *LiveReader) Record() []byte { + return r.rec +} + +// Rebuild a full record from potentially partial records. Returns false +// if there was an error or if we weren't able to read a record for any reason. +// Returns true if we read a full record. Any record data is appended to +// LiveReader.rec +func (r *LiveReader) buildRecord() (bool, error) { + for { + // Check that we have data in the internal buffer to read. + if r.writeIndex <= r.readIndex { + return false, nil + } + + // Attempt to read a record, partial or otherwise. + temp, n, err := r.readRecord() + if err != nil { + return false, err + } + + r.readIndex += n + r.total += int64(n) + if temp == nil { + return false, nil + } + + rt := recTypeFromHeader(r.hdr[0]) + if rt == recFirst || rt == recFull { + r.rec = r.rec[:0] + r.snappyBuf = r.snappyBuf[:0] + } + + compressed := r.hdr[0]&snappyMask != 0 + if compressed { + r.snappyBuf = append(r.snappyBuf, temp...) + } else { + r.rec = append(r.rec, temp...) + } + + if err := validateRecord(rt, r.index); err != nil { + r.index = 0 + return false, err + } + if rt == recLast || rt == recFull { + r.index = 0 + if compressed && len(r.snappyBuf) > 0 { + // The snappy library uses `len` to calculate if we need a new buffer. + // In order to allocate as few buffers as possible make the length + // equal to the capacity. + r.rec = r.rec[:cap(r.rec)] + r.rec, err = snappy.Decode(r.rec, r.snappyBuf) + if err != nil { + return false, err + } + } + return true, nil + } + // Only increment i for non-zero records since we use it + // to determine valid content record sequences. + r.index++ + } +} + +// Returns an error if the recType and i indicate an invalid record sequence. +// As an example, if i is > 0 because we've read some amount of a partial record +// (recFirst, recMiddle, etc. but not recLast) and then we get another recFirst or recFull +// instead of a recLast or recMiddle we would have an invalid record. +func validateRecord(typ recType, i int) error { + switch typ { + case recFull: + if i != 0 { + return errors.New("unexpected full record") + } + return nil + case recFirst: + if i != 0 { + return errors.New("unexpected first record, dropping buffer") + } + return nil + case recMiddle: + if i == 0 { + return errors.New("unexpected middle record, dropping buffer") + } + return nil + case recLast: + if i == 0 { + return errors.New("unexpected last record, dropping buffer") + } + return nil + default: + return errors.Errorf("unexpected record type %d", typ) + } +} + +// Read a sub-record (see recType) from the buffer. It could potentially +// be a full record (recFull) if the record fits within the bounds of a single page. +// Returns a byte slice of the record data read, the number of bytes read, and an error +// if there's a non-zero byte in a page term record or the record checksum fails. +// This is a non-method function to make it clear it does not mutate the reader. +func (r *LiveReader) readRecord() ([]byte, int, error) { + // Special case: for recPageTerm, check that are all zeros to end of page, + // consume them but don't return them. + if r.buf[r.readIndex] == byte(recPageTerm) { + // End of page won't necessarily be end of buffer, as we may have + // got misaligned by records spanning page boundaries. + // r.total % pageSize is the offset into the current page + // that r.readIndex points to in buf. Therefore + // pageSize - (r.total % pageSize) is the amount left to read of + // the current page. + remaining := int(pageSize - (r.total % pageSize)) + if r.readIndex+remaining > r.writeIndex { + return nil, 0, io.EOF + } + + for i := r.readIndex; i < r.readIndex+remaining; i++ { + if r.buf[i] != 0 { + return nil, 0, errors.New("unexpected non-zero byte in page term bytes") + } + } + + return nil, remaining, nil + } + + // Not a recPageTerm; read the record and check the checksum. + if r.writeIndex-r.readIndex < recordHeaderSize { + return nil, 0, io.EOF + } + + copy(r.hdr[:], r.buf[r.readIndex:r.readIndex+recordHeaderSize]) + length := int(binary.BigEndian.Uint16(r.hdr[1:])) + crc := binary.BigEndian.Uint32(r.hdr[3:]) + if r.readIndex+recordHeaderSize+length > pageSize { + if !r.permissive { + return nil, 0, fmt.Errorf("record would overflow current page: %d > %d", r.readIndex+recordHeaderSize+length, pageSize) + } + r.metrics.readerCorruptionErrors.WithLabelValues("record_span_page").Inc() + level.Warn(r.logger).Log("msg", "Record spans page boundaries", "start", r.readIndex, "end", recordHeaderSize+length, "pageSize", pageSize) + } + if recordHeaderSize+length > pageSize { + return nil, 0, fmt.Errorf("record length greater than a single page: %d > %d", recordHeaderSize+length, pageSize) + } + if r.readIndex+recordHeaderSize+length > r.writeIndex { + return nil, 0, io.EOF + } + + rec := r.buf[r.readIndex+recordHeaderSize : r.readIndex+recordHeaderSize+length] + if c := crc32.Checksum(rec, castagnoliTable); c != crc { + return nil, 0, errors.Errorf("unexpected checksum %x, expected %x", c, crc) + } + + return rec, length + recordHeaderSize, nil +} + +func min(i, j int) int { + if i < j { + return i + } + return j +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/reader.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/reader.go new file mode 100644 index 00000000000..7612f8775fa --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/reader.go @@ -0,0 +1,200 @@ +// Copyright 2019 The Prometheus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "encoding/binary" + "hash/crc32" + "io" + + "github.com/golang/snappy" + "github.com/pkg/errors" +) + +// Reader reads WAL records from an io.Reader. +type Reader struct { + rdr io.Reader + err error + rec []byte + snappyBuf []byte + buf [pageSize]byte + total int64 // Total bytes processed. + curRecTyp recType // Used for checking that the last record is not torn. +} + +// NewReader returns a new reader. +func NewReader(r io.Reader) *Reader { + return &Reader{rdr: r} +} + +// Next advances the reader to the next records and returns true if it exists. +// It must not be called again after it returned false. +func (r *Reader) Next() bool { + err := r.next() + if errors.Cause(err) == io.EOF { + // The last WAL segment record shouldn't be torn(should be full or last). + // The last record would be torn after a crash just before + // the last record part could be persisted to disk. + if r.curRecTyp == recFirst || r.curRecTyp == recMiddle { + r.err = errors.New("last record is torn") + } + return false + } + r.err = err + return r.err == nil +} + +func (r *Reader) next() (err error) { + // We have to use r.buf since allocating byte arrays here fails escape + // analysis and ends up on the heap, even though it seemingly should not. + hdr := r.buf[:recordHeaderSize] + buf := r.buf[recordHeaderSize:] + + r.rec = r.rec[:0] + r.snappyBuf = r.snappyBuf[:0] + + i := 0 + for { + if _, err = io.ReadFull(r.rdr, hdr[:1]); err != nil { + return errors.Wrap(err, "read first header byte") + } + r.total++ + r.curRecTyp = recTypeFromHeader(hdr[0]) + compressed := hdr[0]&snappyMask != 0 + + // Gobble up zero bytes. + if r.curRecTyp == recPageTerm { + // recPageTerm is a single byte that indicates the rest of the page is padded. + // If it's the first byte in a page, buf is too small and + // needs to be resized to fit pageSize-1 bytes. + buf = r.buf[1:] + + // We are pedantic and check whether the zeros are actually up + // to a page boundary. + // It's not strictly necessary but may catch sketchy state early. + k := pageSize - (r.total % pageSize) + if k == pageSize { + continue // Initial 0 byte was last page byte. + } + n, err := io.ReadFull(r.rdr, buf[:k]) + if err != nil { + return errors.Wrap(err, "read remaining zeros") + } + r.total += int64(n) + + for _, c := range buf[:k] { + if c != 0 { + return errors.New("unexpected non-zero byte in padded page") + } + } + continue + } + n, err := io.ReadFull(r.rdr, hdr[1:]) + if err != nil { + return errors.Wrap(err, "read remaining header") + } + r.total += int64(n) + + var ( + length = binary.BigEndian.Uint16(hdr[1:]) + crc = binary.BigEndian.Uint32(hdr[3:]) + ) + + if length > pageSize-recordHeaderSize { + return errors.Errorf("invalid record size %d", length) + } + n, err = io.ReadFull(r.rdr, buf[:length]) + if err != nil { + return err + } + r.total += int64(n) + + if n != int(length) { + return errors.Errorf("invalid size: expected %d, got %d", length, n) + } + if c := crc32.Checksum(buf[:length], castagnoliTable); c != crc { + return errors.Errorf("unexpected checksum %x, expected %x", c, crc) + } + + if compressed { + r.snappyBuf = append(r.snappyBuf, buf[:length]...) + } else { + r.rec = append(r.rec, buf[:length]...) + } + + if err := validateRecord(r.curRecTyp, i); err != nil { + return err + } + if r.curRecTyp == recLast || r.curRecTyp == recFull { + if compressed && len(r.snappyBuf) > 0 { + // The snappy library uses `len` to calculate if we need a new buffer. + // In order to allocate as few buffers as possible make the length + // equal to the capacity. + r.rec = r.rec[:cap(r.rec)] + r.rec, err = snappy.Decode(r.rec, r.snappyBuf) + return err + } + return nil + } + + // Only increment i for non-zero records since we use it + // to determine valid content record sequences. + i++ + } +} + +// Err returns the last encountered error wrapped in a corruption error. +// If the reader does not allow to infer a segment index and offset, a total +// offset in the reader stream will be provided. +func (r *Reader) Err() error { + if r.err == nil { + return nil + } + if b, ok := r.rdr.(*segmentBufReader); ok { + return &CorruptionErr{ + Err: r.err, + Dir: b.segs[b.cur].Dir(), + Segment: b.segs[b.cur].Index(), + Offset: int64(b.off), + } + } + return &CorruptionErr{ + Err: r.err, + Segment: -1, + Offset: r.total, + } +} + +// Record returns the current record. The returned byte slice is only +// valid until the next call to Next. +func (r *Reader) Record() []byte { + return r.rec +} + +// Segment returns the current segment being read. +func (r *Reader) Segment() int { + if b, ok := r.rdr.(*segmentBufReader); ok { + return b.segs[b.cur].Index() + } + return -1 +} + +// Offset returns the current position of the segment being read. +func (r *Reader) Offset() int64 { + if b, ok := r.rdr.(*segmentBufReader); ok { + return int64(b.off) + } + return r.total +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go new file mode 100644 index 00000000000..3bc2894d322 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go @@ -0,0 +1,960 @@ +// Copyright 2017 The Prometheus Authors + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "bufio" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/golang/snappy" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/tsdb/fileutil" +) + +const ( + DefaultSegmentSize = 128 * 1024 * 1024 // 128 MB + pageSize = 32 * 1024 // 32KB + recordHeaderSize = 7 +) + +// The table gets initialized with sync.Once but may still cause a race +// with any other use of the crc32 package anywhere. Thus we initialize it +// before. +var castagnoliTable = crc32.MakeTable(crc32.Castagnoli) + +// page is an in memory buffer used to batch disk writes. +// Records bigger than the page size are split and flushed separately. +// A flush is triggered when a single records doesn't fit the page size or +// when the next record can't fit in the remaining free page space. +type page struct { + alloc int + flushed int + buf [pageSize]byte +} + +func (p *page) remaining() int { + return pageSize - p.alloc +} + +func (p *page) full() bool { + return pageSize-p.alloc < recordHeaderSize +} + +func (p *page) reset() { + for i := range p.buf { + p.buf[i] = 0 + } + p.alloc = 0 + p.flushed = 0 +} + +// SegmentFile represents the underlying file used to store a segment. +type SegmentFile interface { + Stat() (os.FileInfo, error) + Sync() error + io.Writer + io.Reader + io.Closer +} + +// Segment represents a segment file. +type Segment struct { + SegmentFile + dir string + i int +} + +// Index returns the index of the segment. +func (s *Segment) Index() int { + return s.i +} + +// Dir returns the directory of the segment. +func (s *Segment) Dir() string { + return s.dir +} + +// CorruptionErr is an error that's returned when corruption is encountered. +type CorruptionErr struct { + Dir string + Segment int + Offset int64 + Err error +} + +func (e *CorruptionErr) Error() string { + if e.Segment < 0 { + return fmt.Sprintf("corruption after %d bytes: %s", e.Offset, e.Err) + } + return fmt.Sprintf("corruption in segment %s at %d: %s", SegmentName(e.Dir, e.Segment), e.Offset, e.Err) +} + +// OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends. +func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { + segName := SegmentName(dir, k) + f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0o666) + if err != nil { + return nil, err + } + stat, err := f.Stat() + if err != nil { + f.Close() + return nil, err + } + // If the last page is torn, fill it with zeros. + // In case it was torn after all records were written successfully, this + // will just pad the page and everything will be fine. + // If it was torn mid-record, a full read (which the caller should do anyway + // to ensure integrity) will detect it as a corruption by the end. + if d := stat.Size() % pageSize; d != 0 { + level.Warn(logger).Log("msg", "Last page of the wal is torn, filling it with zeros", "segment", segName) + if _, err := f.Write(make([]byte, pageSize-d)); err != nil { + f.Close() + return nil, errors.Wrap(err, "zero-pad torn page") + } + } + return &Segment{SegmentFile: f, i: k, dir: dir}, nil +} + +// CreateSegment creates a new segment k in dir. +func CreateSegment(dir string, k int) (*Segment, error) { + f, err := os.OpenFile(SegmentName(dir, k), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o666) + if err != nil { + return nil, err + } + return &Segment{SegmentFile: f, i: k, dir: dir}, nil +} + +// OpenReadSegment opens the segment with the given filename. +func OpenReadSegment(fn string) (*Segment, error) { + k, err := strconv.Atoi(filepath.Base(fn)) + if err != nil { + return nil, errors.New("not a valid filename") + } + f, err := os.Open(fn) + if err != nil { + return nil, err + } + return &Segment{SegmentFile: f, i: k, dir: filepath.Dir(fn)}, nil +} + +// WAL is a write ahead log that stores records in segment files. +// It must be read from start to end once before logging new data. +// If an error occurs during read, the repair procedure must be called +// before it's safe to do further writes. +// +// Segments are written to in pages of 32KB, with records possibly split +// across page boundaries. +// Records are never split across segments to allow full segments to be +// safely truncated. It also ensures that torn writes never corrupt records +// beyond the most recent segment. +type WAL struct { + dir string + logger log.Logger + segmentSize int + mtx sync.RWMutex + segment *Segment // Active segment. + donePages int // Pages written to the segment. + page *page // Active page. + stopc chan chan struct{} + actorc chan func() + closed bool // To allow calling Close() more than once without blocking. + compress bool + snappyBuf []byte + + metrics *walMetrics +} + +type walMetrics struct { + fsyncDuration prometheus.Summary + pageFlushes prometheus.Counter + pageCompletions prometheus.Counter + truncateFail prometheus.Counter + truncateTotal prometheus.Counter + currentSegment prometheus.Gauge + writesFailed prometheus.Counter +} + +func newWALMetrics(r prometheus.Registerer) *walMetrics { + m := &walMetrics{} + + m.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{ + Name: "prometheus_tsdb_wal_fsync_duration_seconds", + Help: "Duration of WAL fsync.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }) + m.pageFlushes = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_wal_page_flushes_total", + Help: "Total number of page flushes.", + }) + m.pageCompletions = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_wal_completed_pages_total", + Help: "Total number of completed pages.", + }) + m.truncateFail = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_wal_truncations_failed_total", + Help: "Total number of WAL truncations that failed.", + }) + m.truncateTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_wal_truncations_total", + Help: "Total number of WAL truncations attempted.", + }) + m.currentSegment = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "prometheus_tsdb_wal_segment_current", + Help: "WAL segment index that TSDB is currently writing to.", + }) + m.writesFailed = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_wal_writes_failed_total", + Help: "Total number of WAL writes that failed.", + }) + + if r != nil { + r.MustRegister( + m.fsyncDuration, + m.pageFlushes, + m.pageCompletions, + m.truncateFail, + m.truncateTotal, + m.currentSegment, + m.writesFailed, + ) + } + + return m +} + +// New returns a new WAL over the given directory. +func New(logger log.Logger, reg prometheus.Registerer, dir string, compress bool) (*WAL, error) { + return NewSize(logger, reg, dir, DefaultSegmentSize, compress) +} + +// NewSize returns a new WAL over the given directory. +// New segments are created with the specified size. +func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress bool) (*WAL, error) { + if segmentSize%pageSize != 0 { + return nil, errors.New("invalid segment size") + } + if err := os.MkdirAll(dir, 0o777); err != nil { + return nil, errors.Wrap(err, "create dir") + } + if logger == nil { + logger = log.NewNopLogger() + } + w := &WAL{ + dir: dir, + logger: logger, + segmentSize: segmentSize, + page: &page{}, + actorc: make(chan func(), 100), + stopc: make(chan chan struct{}), + compress: compress, + } + w.metrics = newWALMetrics(reg) + + _, last, err := Segments(w.Dir()) + if err != nil { + return nil, errors.Wrap(err, "get segment range") + } + + // Index of the Segment we want to open and write to. + writeSegmentIndex := 0 + // If some segments already exist create one with a higher index than the last segment. + if last != -1 { + writeSegmentIndex = last + 1 + } + + segment, err := CreateSegment(w.Dir(), writeSegmentIndex) + if err != nil { + return nil, err + } + + if err := w.setSegment(segment); err != nil { + return nil, err + } + + go w.run() + + return w, nil +} + +// Open an existing WAL. +func Open(logger log.Logger, dir string) (*WAL, error) { + if logger == nil { + logger = log.NewNopLogger() + } + w := &WAL{ + dir: dir, + logger: logger, + } + + return w, nil +} + +// CompressionEnabled returns if compression is enabled on this WAL. +func (w *WAL) CompressionEnabled() bool { + return w.compress +} + +// Dir returns the directory of the WAL. +func (w *WAL) Dir() string { + return w.dir +} + +func (w *WAL) run() { +Loop: + for { + select { + case f := <-w.actorc: + f() + case donec := <-w.stopc: + close(w.actorc) + defer close(donec) + break Loop + } + } + // Drain and process any remaining functions. + for f := range w.actorc { + f() + } +} + +// Repair attempts to repair the WAL based on the error. +// It discards all data after the corruption. +func (w *WAL) Repair(origErr error) error { + // We could probably have a mode that only discards torn records right around + // the corruption to preserve as data much as possible. + // But that's not generally applicable if the records have any kind of causality. + // Maybe as an extra mode in the future if mid-WAL corruptions become + // a frequent concern. + err := errors.Cause(origErr) // So that we can pick up errors even if wrapped. + + cerr, ok := err.(*CorruptionErr) + if !ok { + return errors.Wrap(origErr, "cannot handle error") + } + if cerr.Segment < 0 { + return errors.New("corruption error does not specify position") + } + level.Warn(w.logger).Log("msg", "Starting corruption repair", + "segment", cerr.Segment, "offset", cerr.Offset) + + // All segments behind the corruption can no longer be used. + segs, err := listSegments(w.Dir()) + if err != nil { + return errors.Wrap(err, "list segments") + } + level.Warn(w.logger).Log("msg", "Deleting all segments newer than corrupted segment", "segment", cerr.Segment) + + for _, s := range segs { + if w.segment.i == s.index { + // The active segment needs to be removed, + // close it first (Windows!). Can be closed safely + // as we set the current segment to repaired file + // below. + if err := w.segment.Close(); err != nil { + return errors.Wrap(err, "close active segment") + } + } + if s.index <= cerr.Segment { + continue + } + if err := os.Remove(filepath.Join(w.Dir(), s.name)); err != nil { + return errors.Wrapf(err, "delete segment:%v", s.index) + } + } + // Regardless of the corruption offset, no record reaches into the previous segment. + // So we can safely repair the WAL by removing the segment and re-inserting all + // its records up to the corruption. + level.Warn(w.logger).Log("msg", "Rewrite corrupted segment", "segment", cerr.Segment) + + fn := SegmentName(w.Dir(), cerr.Segment) + tmpfn := fn + ".repair" + + if err := fileutil.Rename(fn, tmpfn); err != nil { + return err + } + // Create a clean segment and make it the active one. + s, err := CreateSegment(w.Dir(), cerr.Segment) + if err != nil { + return err + } + if err := w.setSegment(s); err != nil { + return err + } + + f, err := os.Open(tmpfn) + if err != nil { + return errors.Wrap(err, "open segment") + } + defer f.Close() + + r := NewReader(bufio.NewReader(f)) + + for r.Next() { + // Add records only up to the where the error was. + if r.Offset() >= cerr.Offset { + break + } + if err := w.Log(r.Record()); err != nil { + return errors.Wrap(err, "insert record") + } + } + // We expect an error here from r.Err(), so nothing to handle. + + // We need to pad to the end of the last page in the repaired segment + if err := w.flushPage(true); err != nil { + return errors.Wrap(err, "flush page in repair") + } + + // We explicitly close even when there is a defer for Windows to be + // able to delete it. The defer is in place to close it in-case there + // are errors above. + if err := f.Close(); err != nil { + return errors.Wrap(err, "close corrupted file") + } + if err := os.Remove(tmpfn); err != nil { + return errors.Wrap(err, "delete corrupted segment") + } + + // Explicitly close the segment we just repaired to avoid issues with Windows. + s.Close() + + // We always want to start writing to a new Segment rather than an existing + // Segment, which is handled by NewSize, but earlier in Repair we're deleting + // all segments that come after the corrupted Segment. Recreate a new Segment here. + s, err = CreateSegment(w.Dir(), cerr.Segment+1) + if err != nil { + return err + } + return w.setSegment(s) +} + +// SegmentName builds a segment name for the directory. +func SegmentName(dir string, i int) string { + return filepath.Join(dir, fmt.Sprintf("%08d", i)) +} + +// NextSegment creates the next segment and closes the previous one. +func (w *WAL) NextSegment() error { + w.mtx.Lock() + defer w.mtx.Unlock() + return w.nextSegment() +} + +// nextSegment creates the next segment and closes the previous one. +func (w *WAL) nextSegment() error { + if w.closed { + return errors.New("wal is closed") + } + + // Only flush the current page if it actually holds data. + if w.page.alloc > 0 { + if err := w.flushPage(true); err != nil { + return err + } + } + next, err := CreateSegment(w.Dir(), w.segment.Index()+1) + if err != nil { + return errors.Wrap(err, "create new segment file") + } + prev := w.segment + if err := w.setSegment(next); err != nil { + return err + } + + // Don't block further writes by fsyncing the last segment. + w.actorc <- func() { + if err := w.fsync(prev); err != nil { + level.Error(w.logger).Log("msg", "sync previous segment", "err", err) + } + if err := prev.Close(); err != nil { + level.Error(w.logger).Log("msg", "close previous segment", "err", err) + } + } + return nil +} + +func (w *WAL) setSegment(segment *Segment) error { + w.segment = segment + + // Correctly initialize donePages. + stat, err := segment.Stat() + if err != nil { + return err + } + w.donePages = int(stat.Size() / pageSize) + w.metrics.currentSegment.Set(float64(segment.Index())) + return nil +} + +// flushPage writes the new contents of the page to disk. If no more records will fit into +// the page, the remaining bytes will be set to zero and a new page will be started. +// If clear is true, this is enforced regardless of how many bytes are left in the page. +func (w *WAL) flushPage(clear bool) error { + w.metrics.pageFlushes.Inc() + + p := w.page + clear = clear || p.full() + + // No more data will fit into the page or an implicit clear. + // Enqueue and clear it. + if clear { + p.alloc = pageSize // Write till end of page. + } + + n, err := w.segment.Write(p.buf[p.flushed:p.alloc]) + if err != nil { + p.flushed += n + return err + } + p.flushed += n + + // We flushed an entire page, prepare a new one. + if clear { + p.reset() + w.donePages++ + w.metrics.pageCompletions.Inc() + } + return nil +} + +// First Byte of header format: +// [ 4 bits unallocated] [1 bit snappy compression flag] [ 3 bit record type ] +const ( + snappyMask = 1 << 3 + recTypeMask = snappyMask - 1 +) + +type recType uint8 + +const ( + recPageTerm recType = 0 // Rest of page is empty. + recFull recType = 1 // Full record. + recFirst recType = 2 // First fragment of a record. + recMiddle recType = 3 // Middle fragments of a record. + recLast recType = 4 // Final fragment of a record. +) + +func recTypeFromHeader(header byte) recType { + return recType(header & recTypeMask) +} + +func (t recType) String() string { + switch t { + case recPageTerm: + return "zero" + case recFull: + return "full" + case recFirst: + return "first" + case recMiddle: + return "middle" + case recLast: + return "last" + default: + return "" + } +} + +func (w *WAL) pagesPerSegment() int { + return w.segmentSize / pageSize +} + +// Log writes the records into the log. +// Multiple records can be passed at once to reduce writes and increase throughput. +func (w *WAL) Log(recs ...[]byte) error { + w.mtx.Lock() + defer w.mtx.Unlock() + // Callers could just implement their own list record format but adding + // a bit of extra logic here frees them from that overhead. + for i, r := range recs { + if err := w.log(r, i == len(recs)-1); err != nil { + w.metrics.writesFailed.Inc() + return err + } + } + return nil +} + +// log writes rec to the log and forces a flush of the current page if: +// - the final record of a batch +// - the record is bigger than the page size +// - the current page is full. +func (w *WAL) log(rec []byte, final bool) error { + // When the last page flush failed the page will remain full. + // When the page is full, need to flush it before trying to add more records to it. + if w.page.full() { + if err := w.flushPage(true); err != nil { + return err + } + } + + // Compress the record before calculating if a new segment is needed. + compressed := false + if w.compress && + len(rec) > 0 && + // If MaxEncodedLen is less than 0 the record is too large to be compressed. + snappy.MaxEncodedLen(len(rec)) >= 0 { + // The snappy library uses `len` to calculate if we need a new buffer. + // In order to allocate as few buffers as possible make the length + // equal to the capacity. + w.snappyBuf = w.snappyBuf[:cap(w.snappyBuf)] + w.snappyBuf = snappy.Encode(w.snappyBuf, rec) + if len(w.snappyBuf) < len(rec) { + rec = w.snappyBuf + compressed = true + } + } + + // If the record is too big to fit within the active page in the current + // segment, terminate the active segment and advance to the next one. + // This ensures that records do not cross segment boundaries. + left := w.page.remaining() - recordHeaderSize // Free space in the active page. + left += (pageSize - recordHeaderSize) * (w.pagesPerSegment() - w.donePages - 1) // Free pages in the active segment. + + if len(rec) > left { + if err := w.nextSegment(); err != nil { + return err + } + } + + // Populate as many pages as necessary to fit the record. + // Be careful to always do one pass to ensure we write zero-length records. + for i := 0; i == 0 || len(rec) > 0; i++ { + p := w.page + + // Find how much of the record we can fit into the page. + var ( + l = min(len(rec), (pageSize-p.alloc)-recordHeaderSize) + part = rec[:l] + buf = p.buf[p.alloc:] + typ recType + ) + + switch { + case i == 0 && len(part) == len(rec): + typ = recFull + case len(part) == len(rec): + typ = recLast + case i == 0: + typ = recFirst + default: + typ = recMiddle + } + if compressed { + typ |= snappyMask + } + + buf[0] = byte(typ) + crc := crc32.Checksum(part, castagnoliTable) + binary.BigEndian.PutUint16(buf[1:], uint16(len(part))) + binary.BigEndian.PutUint32(buf[3:], crc) + + copy(buf[recordHeaderSize:], part) + p.alloc += len(part) + recordHeaderSize + + if w.page.full() { + if err := w.flushPage(true); err != nil { + // TODO When the flushing fails at this point and the record has not been + // fully written to the buffer, we end up with a corrupted WAL because some part of the + // record have been written to the buffer, while the rest of the record will be discarded. + return err + } + } + rec = rec[l:] + } + + // If it's the final record of the batch and the page is not empty, flush it. + if final && w.page.alloc > 0 { + if err := w.flushPage(false); err != nil { + return err + } + } + + return nil +} + +// LastSegmentAndOffset returns the last segment number of the WAL +// and the offset in that file upto which the segment has been filled. +func (w *WAL) LastSegmentAndOffset() (seg, offset int, err error) { + w.mtx.Lock() + defer w.mtx.Unlock() + + _, seg, err = Segments(w.Dir()) + if err != nil { + return + } + + offset = (w.donePages * pageSize) + w.page.alloc + + return +} + +// Truncate drops all segments before i. +func (w *WAL) Truncate(i int) (err error) { + w.metrics.truncateTotal.Inc() + defer func() { + if err != nil { + w.metrics.truncateFail.Inc() + } + }() + refs, err := listSegments(w.Dir()) + if err != nil { + return err + } + for _, r := range refs { + if r.index >= i { + break + } + if err = os.Remove(filepath.Join(w.Dir(), r.name)); err != nil { + return err + } + } + return nil +} + +func (w *WAL) fsync(f *Segment) error { + start := time.Now() + err := f.Sync() + w.metrics.fsyncDuration.Observe(time.Since(start).Seconds()) + return err +} + +// Close flushes all writes and closes active segment. +func (w *WAL) Close() (err error) { + w.mtx.Lock() + defer w.mtx.Unlock() + + if w.closed { + return errors.New("wal already closed") + } + + if w.segment == nil { + w.closed = true + return nil + } + + // Flush the last page and zero out all its remaining size. + // We must not flush an empty page as it would falsely signal + // the segment is done if we start writing to it again after opening. + if w.page.alloc > 0 { + if err := w.flushPage(true); err != nil { + return err + } + } + + donec := make(chan struct{}) + w.stopc <- donec + <-donec + + if err = w.fsync(w.segment); err != nil { + level.Error(w.logger).Log("msg", "sync previous segment", "err", err) + } + if err := w.segment.Close(); err != nil { + level.Error(w.logger).Log("msg", "close previous segment", "err", err) + } + w.closed = true + return nil +} + +// Segments returns the range [first, n] of currently existing segments. +// If no segments are found, first and n are -1. +func Segments(walDir string) (first, last int, err error) { + refs, err := listSegments(walDir) + if err != nil { + return 0, 0, err + } + if len(refs) == 0 { + return -1, -1, nil + } + return refs[0].index, refs[len(refs)-1].index, nil +} + +type segmentRef struct { + name string + index int +} + +func listSegments(dir string) (refs []segmentRef, err error) { + files, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + for _, f := range files { + fn := f.Name() + k, err := strconv.Atoi(fn) + if err != nil { + continue + } + refs = append(refs, segmentRef{name: fn, index: k}) + } + sort.Slice(refs, func(i, j int) bool { + return refs[i].index < refs[j].index + }) + for i := 0; i < len(refs)-1; i++ { + if refs[i].index+1 != refs[i+1].index { + return nil, errors.New("segments are not sequential") + } + } + return refs, nil +} + +// SegmentRange groups segments by the directory and the first and last index it includes. +type SegmentRange struct { + Dir string + First, Last int +} + +// NewSegmentsReader returns a new reader over all segments in the directory. +func NewSegmentsReader(dir string) (io.ReadCloser, error) { + return NewSegmentsRangeReader(SegmentRange{dir, -1, -1}) +} + +// NewSegmentsRangeReader returns a new reader over the given WAL segment ranges. +// If first or last are -1, the range is open on the respective end. +func NewSegmentsRangeReader(sr ...SegmentRange) (io.ReadCloser, error) { + var segs []*Segment + + for _, sgmRange := range sr { + refs, err := listSegments(sgmRange.Dir) + if err != nil { + return nil, errors.Wrapf(err, "list segment in dir:%v", sgmRange.Dir) + } + + for _, r := range refs { + if sgmRange.First >= 0 && r.index < sgmRange.First { + continue + } + if sgmRange.Last >= 0 && r.index > sgmRange.Last { + break + } + s, err := OpenReadSegment(filepath.Join(sgmRange.Dir, r.name)) + if err != nil { + return nil, errors.Wrapf(err, "open segment:%v in dir:%v", r.name, sgmRange.Dir) + } + segs = append(segs, s) + } + } + return NewSegmentBufReader(segs...), nil +} + +// segmentBufReader is a buffered reader that reads in multiples of pages. +// The main purpose is that we are able to track segment and offset for +// corruption reporting. We have to be careful not to increment curr too +// early, as it is used by Reader.Err() to tell Repair which segment is corrupt. +// As such we pad the end of non-page align segments with zeros. +type segmentBufReader struct { + buf *bufio.Reader + segs []*Segment + cur int // Index into segs. + off int // Offset of read data into current segment. +} + +// nolint:revive // TODO: Consider exporting segmentBufReader +func NewSegmentBufReader(segs ...*Segment) *segmentBufReader { + if len(segs) == 0 { + return &segmentBufReader{} + } + + return &segmentBufReader{ + buf: bufio.NewReaderSize(segs[0], 16*pageSize), + segs: segs, + } +} + +// nolint:revive +func NewSegmentBufReaderWithOffset(offset int, segs ...*Segment) (sbr *segmentBufReader, err error) { + if offset == 0 || len(segs) == 0 { + return NewSegmentBufReader(segs...), nil + } + + sbr = &segmentBufReader{ + buf: bufio.NewReaderSize(segs[0], 16*pageSize), + segs: segs, + } + if offset > 0 { + _, err = sbr.buf.Discard(offset) + } + return sbr, err +} + +func (r *segmentBufReader) Close() (err error) { + for _, s := range r.segs { + if e := s.Close(); e != nil { + err = e + } + } + return err +} + +// Read implements io.Reader. +func (r *segmentBufReader) Read(b []byte) (n int, err error) { + if len(r.segs) == 0 { + return 0, io.EOF + } + + n, err = r.buf.Read(b) + r.off += n + + // If we succeeded, or hit a non-EOF, we can stop. + if err == nil || err != io.EOF { + return n, err + } + + // We hit EOF; fake out zero padding at the end of short segments, so we + // don't increment curr too early and report the wrong segment as corrupt. + if r.off%pageSize != 0 { + i := 0 + for ; n+i < len(b) && (r.off+i)%pageSize != 0; i++ { + b[n+i] = 0 + } + + // Return early, even if we didn't fill b. + r.off += i + return n + i, nil + } + + // There is no more deta left in the curr segment and there are no more + // segments left. Return EOF. + if r.cur+1 >= len(r.segs) { + return n, io.EOF + } + + // Move to next segment. + r.cur++ + r.off = 0 + r.buf.Reset(r.segs[r.cur]) + return n, nil +} + +// Computing size of the WAL. +// We do this by adding the sizes of all the files under the WAL dir. +func (w *WAL) Size() (int64, error) { + return fileutil.DirSize(w.Dir()) +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go new file mode 100644 index 00000000000..3c47b58d057 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go @@ -0,0 +1,671 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "fmt" + "io" + "io/ioutil" + "math" + "os" + "path" + "sort" + "strconv" + "strings" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/tsdb/record" +) + +const ( + readPeriod = 10 * time.Millisecond + checkpointPeriod = 5 * time.Second + segmentCheckPeriod = 100 * time.Millisecond + consumer = "consumer" +) + +// WriteTo is an interface used by the Watcher to send the samples it's read +// from the WAL on to somewhere else. Functions will be called concurrently +// and it is left to the implementer to make sure they are safe. +type WriteTo interface { + Append([]record.RefSample) bool + AppendExemplars([]record.RefExemplar) bool + StoreSeries([]record.RefSeries, int) + // Next two methods are intended for garbage-collection: first we call + // UpdateSeriesSegment on all current series + UpdateSeriesSegment([]record.RefSeries, int) + // Then SeriesReset is called to allow the deletion + // of all series created in a segment lower than the argument. + SeriesReset(int) +} + +type WatcherMetrics struct { + recordsRead *prometheus.CounterVec + recordDecodeFails *prometheus.CounterVec + samplesSentPreTailing *prometheus.CounterVec + currentSegment *prometheus.GaugeVec +} + +// Watcher watches the TSDB WAL for a given WriteTo. +type Watcher struct { + name string + writer WriteTo + logger log.Logger + walDir string + lastCheckpoint string + sendExemplars bool + metrics *WatcherMetrics + readerMetrics *LiveReaderMetrics + + startTime time.Time + startTimestamp int64 // the start time as a Prometheus timestamp + sendSamples bool + + recordsReadMetric *prometheus.CounterVec + recordDecodeFailsMetric prometheus.Counter + samplesSentPreTailing prometheus.Counter + currentSegmentMetric prometheus.Gauge + + quit chan struct{} + done chan struct{} + + // For testing, stop when we hit this segment. + MaxSegment int +} + +func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics { + m := &WatcherMetrics{ + recordsRead: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "wal_watcher", + Name: "records_read_total", + Help: "Number of records read by the WAL watcher from the WAL.", + }, + []string{consumer, "type"}, + ), + recordDecodeFails: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "wal_watcher", + Name: "record_decode_failures_total", + Help: "Number of records read by the WAL watcher that resulted in an error when decoding.", + }, + []string{consumer}, + ), + samplesSentPreTailing: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "wal_watcher", + Name: "samples_sent_pre_tailing_total", + Help: "Number of sample records read by the WAL watcher and sent to remote write during replay of existing WAL.", + }, + []string{consumer}, + ), + currentSegment: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "prometheus", + Subsystem: "wal_watcher", + Name: "current_segment", + Help: "Current segment the WAL watcher is reading records from.", + }, + []string{consumer}, + ), + } + + if reg != nil { + reg.MustRegister(m.recordsRead) + reg.MustRegister(m.recordDecodeFails) + reg.MustRegister(m.samplesSentPreTailing) + reg.MustRegister(m.currentSegment) + } + + return m +} + +// NewWatcher creates a new WAL watcher for a given WriteTo. +func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, walDir string, sendExemplars bool) *Watcher { + if logger == nil { + logger = log.NewNopLogger() + } + return &Watcher{ + logger: logger, + writer: writer, + metrics: metrics, + readerMetrics: readerMetrics, + walDir: path.Join(walDir, "wal"), + name: name, + sendExemplars: sendExemplars, + + quit: make(chan struct{}), + done: make(chan struct{}), + + MaxSegment: -1, + } +} + +func (w *Watcher) setMetrics() { + // Setup the WAL Watchers metrics. We do this here rather than in the + // constructor because of the ordering of creating Queue Managers's, + // stopping them, and then starting new ones in storage/remote/storage.go ApplyConfig. + if w.metrics != nil { + w.recordsReadMetric = w.metrics.recordsRead.MustCurryWith(prometheus.Labels{consumer: w.name}) + w.recordDecodeFailsMetric = w.metrics.recordDecodeFails.WithLabelValues(w.name) + w.samplesSentPreTailing = w.metrics.samplesSentPreTailing.WithLabelValues(w.name) + w.currentSegmentMetric = w.metrics.currentSegment.WithLabelValues(w.name) + } +} + +// Start the Watcher. +func (w *Watcher) Start() { + w.setMetrics() + level.Info(w.logger).Log("msg", "Starting WAL watcher", "queue", w.name) + + go w.loop() +} + +// Stop the Watcher. +func (w *Watcher) Stop() { + close(w.quit) + <-w.done + + // Records read metric has series and samples. + if w.metrics != nil { + w.metrics.recordsRead.DeleteLabelValues(w.name, "series") + w.metrics.recordsRead.DeleteLabelValues(w.name, "samples") + w.metrics.recordDecodeFails.DeleteLabelValues(w.name) + w.metrics.samplesSentPreTailing.DeleteLabelValues(w.name) + w.metrics.currentSegment.DeleteLabelValues(w.name) + } + + level.Info(w.logger).Log("msg", "WAL watcher stopped", "queue", w.name) +} + +func (w *Watcher) loop() { + defer close(w.done) + + // We may encounter failures processing the WAL; we should wait and retry. + for !isClosed(w.quit) { + w.SetStartTime(time.Now()) + if err := w.Run(); err != nil { + level.Error(w.logger).Log("msg", "error tailing WAL", "err", err) + } + + select { + case <-w.quit: + return + case <-time.After(5 * time.Second): + } + } +} + +// Run the watcher, which will tail the WAL until the quit channel is closed +// or an error case is hit. +func (w *Watcher) Run() error { + _, lastSegment, err := w.firstAndLast() + if err != nil { + return errors.Wrap(err, "wal.Segments") + } + + // We want to ensure this is false across iterations since + // Run will be called again if there was a failure to read the WAL. + w.sendSamples = false + + level.Info(w.logger).Log("msg", "Replaying WAL", "queue", w.name) + + // Backfill from the checkpoint first if it exists. + lastCheckpoint, checkpointIndex, err := LastCheckpoint(w.walDir) + if err != nil && err != record.ErrNotFound { + return errors.Wrap(err, "tsdb.LastCheckpoint") + } + + if err == nil { + if err = w.readCheckpoint(lastCheckpoint, (*Watcher).readSegment); err != nil { + return errors.Wrap(err, "readCheckpoint") + } + } + w.lastCheckpoint = lastCheckpoint + + currentSegment, err := w.findSegmentForIndex(checkpointIndex) + if err != nil { + return err + } + + level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment) + for !isClosed(w.quit) { + w.currentSegmentMetric.Set(float64(currentSegment)) + level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment) + + // On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment. + // On subsequent calls to this function, currentSegment will have been incremented and we should open that segment. + if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil { + return err + } + + // For testing: stop when you hit a specific segment. + if currentSegment == w.MaxSegment { + return nil + } + + currentSegment++ + } + + return nil +} + +// findSegmentForIndex finds the first segment greater than or equal to index. +func (w *Watcher) findSegmentForIndex(index int) (int, error) { + refs, err := w.segments(w.walDir) + if err != nil { + return -1, err + } + + for _, r := range refs { + if r >= index { + return r, nil + } + } + + return -1, errors.New("failed to find segment for index") +} + +func (w *Watcher) firstAndLast() (int, int, error) { + refs, err := w.segments(w.walDir) + if err != nil { + return -1, -1, err + } + + if len(refs) == 0 { + return -1, -1, nil + } + return refs[0], refs[len(refs)-1], nil +} + +// Copied from tsdb/wal/wal.go so we do not have to open a WAL. +// Plan is to move WAL watcher to TSDB and dedupe these implementations. +func (w *Watcher) segments(dir string) ([]int, error) { + files, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + + var refs []int + for _, f := range files { + k, err := strconv.Atoi(f.Name()) + if err != nil { + continue + } + refs = append(refs, k) + } + sort.Ints(refs) + for i := 0; i < len(refs)-1; i++ { + if refs[i]+1 != refs[i+1] { + return nil, errors.New("segments are not sequential") + } + } + return refs, nil +} + +// Use tail true to indicate that the reader is currently on a segment that is +// actively being written to. If false, assume it's a full segment and we're +// replaying it on start to cache the series records. +func (w *Watcher) watch(segmentNum int, tail bool) error { + segment, err := OpenReadSegment(SegmentName(w.walDir, segmentNum)) + if err != nil { + return err + } + defer segment.Close() + + reader := NewLiveReader(w.logger, w.readerMetrics, segment) + + readTicker := time.NewTicker(readPeriod) + defer readTicker.Stop() + + checkpointTicker := time.NewTicker(checkpointPeriod) + defer checkpointTicker.Stop() + + segmentTicker := time.NewTicker(segmentCheckPeriod) + defer segmentTicker.Stop() + + // If we're replaying the segment we need to know the size of the file to know + // when to return from watch and move on to the next segment. + size := int64(math.MaxInt64) + if !tail { + segmentTicker.Stop() + checkpointTicker.Stop() + var err error + size, err = getSegmentSize(w.walDir, segmentNum) + if err != nil { + return errors.Wrap(err, "getSegmentSize") + } + } + + gcSem := make(chan struct{}, 1) + for { + select { + case <-w.quit: + return nil + + case <-checkpointTicker.C: + // Periodically check if there is a new checkpoint so we can garbage + // collect labels. As this is considered an optimisation, we ignore + // errors during checkpoint processing. Doing the process asynchronously + // allows the current WAL segment to be processed while reading the + // checkpoint. + select { + case gcSem <- struct{}{}: + go func() { + defer func() { + <-gcSem + }() + if err := w.garbageCollectSeries(segmentNum); err != nil { + level.Warn(w.logger).Log("msg", "Error process checkpoint", "err", err) + } + }() + default: + // Currently doing a garbage collect, try again later. + } + + case <-segmentTicker.C: + _, last, err := w.firstAndLast() + if err != nil { + return errors.Wrap(err, "segments") + } + + // Check if new segments exists. + if last <= segmentNum { + continue + } + + err = w.readSegment(reader, segmentNum, tail) + + // Ignore errors reading to end of segment whilst replaying the WAL. + if !tail { + if err != nil && errors.Cause(err) != io.EOF { + level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err) + } else if reader.Offset() != size { + level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) + } + return nil + } + + // Otherwise, when we are tailing, non-EOFs are fatal. + if errors.Cause(err) != io.EOF { + return err + } + + return nil + + case <-readTicker.C: + err = w.readSegment(reader, segmentNum, tail) + + // Ignore all errors reading to end of segment whilst replaying the WAL. + if !tail { + if err != nil && errors.Cause(err) != io.EOF { + level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) + } else if reader.Offset() != size { + level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) + } + return nil + } + + // Otherwise, when we are tailing, non-EOFs are fatal. + if errors.Cause(err) != io.EOF { + return err + } + } + } +} + +func (w *Watcher) garbageCollectSeries(segmentNum int) error { + dir, _, err := LastCheckpoint(w.walDir) + if err != nil && err != record.ErrNotFound { + return errors.Wrap(err, "tsdb.LastCheckpoint") + } + + if dir == "" || dir == w.lastCheckpoint { + return nil + } + w.lastCheckpoint = dir + + index, err := checkpointNum(dir) + if err != nil { + return errors.Wrap(err, "error parsing checkpoint filename") + } + + if index >= segmentNum { + level.Debug(w.logger).Log("msg", "Current segment is behind the checkpoint, skipping reading of checkpoint", "current", fmt.Sprintf("%08d", segmentNum), "checkpoint", dir) + return nil + } + + level.Debug(w.logger).Log("msg", "New checkpoint detected", "new", dir, "currentSegment", segmentNum) + + if err = w.readCheckpoint(dir, (*Watcher).readSegmentForGC); err != nil { + return errors.Wrap(err, "readCheckpoint") + } + + // Clear series with a checkpoint or segment index # lower than the checkpoint we just read. + w.writer.SeriesReset(index) + return nil +} + +// Read from a segment and pass the details to w.writer. +// Also used with readCheckpoint - implements segmentReadFn. +func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { + var ( + dec record.Decoder + series []record.RefSeries + samples []record.RefSample + send []record.RefSample + exemplars []record.RefExemplar + ) + for r.Next() && !isClosed(w.quit) { + rec := r.Record() + w.recordsReadMetric.WithLabelValues(recordType(dec.Type(rec))).Inc() + + switch dec.Type(rec) { + case record.Series: + series, err := dec.Series(rec, series[:0]) + if err != nil { + w.recordDecodeFailsMetric.Inc() + return err + } + w.writer.StoreSeries(series, segmentNum) + + case record.Samples: + // If we're not tailing a segment we can ignore any samples records we see. + // This speeds up replay of the WAL by > 10x. + if !tail { + break + } + samples, err := dec.Samples(rec, samples[:0]) + if err != nil { + w.recordDecodeFailsMetric.Inc() + return err + } + for _, s := range samples { + if s.T > w.startTimestamp { + if !w.sendSamples { + w.sendSamples = true + duration := time.Since(w.startTime) + level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration) + } + send = append(send, s) + } + } + if len(send) > 0 { + // Blocks until the sample is sent to all remote write endpoints or closed (because enqueue blocks). + w.writer.Append(send) + send = send[:0] + } + + case record.Exemplars: + // Skip if experimental "exemplars over remote write" is not enabled. + if !w.sendExemplars { + break + } + // If we're not tailing a segment we can ignore any exemplars records we see. + // This speeds up replay of the WAL significantly. + if !tail { + break + } + exemplars, err := dec.Exemplars(rec, exemplars[:0]) + if err != nil { + w.recordDecodeFailsMetric.Inc() + return err + } + w.writer.AppendExemplars(exemplars) + + case record.Tombstones: + + default: + // Could be corruption, or reading from a WAL from a newer Prometheus. + w.recordDecodeFailsMetric.Inc() + } + } + return errors.Wrapf(r.Err(), "segment %d: %v", segmentNum, r.Err()) +} + +// Go through all series in a segment updating the segmentNum, so we can delete older series. +// Used with readCheckpoint - implements segmentReadFn. +func (w *Watcher) readSegmentForGC(r *LiveReader, segmentNum int, _ bool) error { + var ( + dec record.Decoder + series []record.RefSeries + ) + for r.Next() && !isClosed(w.quit) { + rec := r.Record() + w.recordsReadMetric.WithLabelValues(recordType(dec.Type(rec))).Inc() + + switch dec.Type(rec) { + case record.Series: + series, err := dec.Series(rec, series[:0]) + if err != nil { + w.recordDecodeFailsMetric.Inc() + return err + } + w.writer.UpdateSeriesSegment(series, segmentNum) + + // Ignore these; we're only interested in series. + case record.Samples: + case record.Exemplars: + case record.Tombstones: + + default: + // Could be corruption, or reading from a WAL from a newer Prometheus. + w.recordDecodeFailsMetric.Inc() + } + } + return errors.Wrapf(r.Err(), "segment %d: %v", segmentNum, r.Err()) +} + +func (w *Watcher) SetStartTime(t time.Time) { + w.startTime = t + w.startTimestamp = timestamp.FromTime(t) +} + +func recordType(rt record.Type) string { + switch rt { + case record.Series: + return "series" + case record.Samples: + return "samples" + case record.Tombstones: + return "tombstones" + default: + return "unknown" + } +} + +type segmentReadFn func(w *Watcher, r *LiveReader, segmentNum int, tail bool) error + +// Read all the series records from a Checkpoint directory. +func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) error { + level.Debug(w.logger).Log("msg", "Reading checkpoint", "dir", checkpointDir) + index, err := checkpointNum(checkpointDir) + if err != nil { + return errors.Wrap(err, "checkpointNum") + } + + // Ensure we read the whole contents of every segment in the checkpoint dir. + segs, err := w.segments(checkpointDir) + if err != nil { + return errors.Wrap(err, "Unable to get segments checkpoint dir") + } + for _, seg := range segs { + size, err := getSegmentSize(checkpointDir, seg) + if err != nil { + return errors.Wrap(err, "getSegmentSize") + } + + sr, err := OpenReadSegment(SegmentName(checkpointDir, seg)) + if err != nil { + return errors.Wrap(err, "unable to open segment") + } + defer sr.Close() + + r := NewLiveReader(w.logger, w.readerMetrics, sr) + if err := readFn(w, r, index, false); errors.Cause(err) != io.EOF && err != nil { + return errors.Wrap(err, "readSegment") + } + + if r.Offset() != size { + return fmt.Errorf("readCheckpoint wasn't able to read all data from the checkpoint %s/%08d, size: %d, totalRead: %d", checkpointDir, seg, size, r.Offset()) + } + } + + level.Debug(w.logger).Log("msg", "Read series references from checkpoint", "checkpoint", checkpointDir) + return nil +} + +func checkpointNum(dir string) (int, error) { + // Checkpoint dir names are in the format checkpoint.000001 + // dir may contain a hidden directory, so only check the base directory + chunks := strings.Split(path.Base(dir), ".") + if len(chunks) != 2 { + return 0, errors.Errorf("invalid checkpoint dir string: %s", dir) + } + + result, err := strconv.Atoi(chunks[1]) + if err != nil { + return 0, errors.Errorf("invalid checkpoint dir string: %s", dir) + } + + return result, nil +} + +// Get size of segment. +func getSegmentSize(dir string, index int) (int64, error) { + i := int64(-1) + fi, err := os.Stat(SegmentName(dir, index)) + if err == nil { + i = fi.Size() + } + return i, err +} + +func isClosed(c chan struct{}) bool { + select { + case <-c: + return true + default: + return false + } +} diff --git a/vendor/github.com/prometheus/prometheus/util/gate/gate.go b/vendor/github.com/prometheus/prometheus/util/gate/gate.go new file mode 100644 index 00000000000..6cb9d583c6c --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/util/gate/gate.go @@ -0,0 +1,48 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gate + +import "context" + +// A Gate controls the maximum number of concurrently running and waiting queries. +type Gate struct { + ch chan struct{} +} + +// New returns a query gate that limits the number of queries +// being concurrently executed. +func New(length int) *Gate { + return &Gate{ + ch: make(chan struct{}, length), + } +} + +// Start blocks until the gate has a free spot or the context is done. +func (g *Gate) Start(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case g.ch <- struct{}{}: + return nil + } +} + +// Done releases a single spot in the gate. +func (g *Gate) Done() { + select { + case <-g.ch: + default: + panic("gate.Done: more operations done than started") + } +} diff --git a/vendor/github.com/prometheus/prometheus/util/logging/dedupe.go b/vendor/github.com/prometheus/prometheus/util/logging/dedupe.go new file mode 100644 index 00000000000..d490a6afdf1 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/util/logging/dedupe.go @@ -0,0 +1,130 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging + +import ( + "bytes" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-logfmt/logfmt" +) + +const ( + garbageCollectEvery = 10 * time.Second + expireEntriesAfter = 1 * time.Minute + maxEntries = 1024 +) + +type logfmtEncoder struct { + *logfmt.Encoder + buf bytes.Buffer +} + +var logfmtEncoderPool = sync.Pool{ + New: func() interface{} { + var enc logfmtEncoder + enc.Encoder = logfmt.NewEncoder(&enc.buf) + return &enc + }, +} + +// Deduper implement log.Logger, dedupes log lines. +type Deduper struct { + next log.Logger + repeat time.Duration + quit chan struct{} + mtx sync.RWMutex + seen map[string]time.Time +} + +// Dedupe log lines to next, only repeating every repeat duration. +func Dedupe(next log.Logger, repeat time.Duration) *Deduper { + d := &Deduper{ + next: next, + repeat: repeat, + quit: make(chan struct{}), + seen: map[string]time.Time{}, + } + go d.run() + return d +} + +// Stop the Deduper. +func (d *Deduper) Stop() { + close(d.quit) +} + +func (d *Deduper) run() { + ticker := time.NewTicker(garbageCollectEvery) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + d.mtx.Lock() + now := time.Now() + for line, seen := range d.seen { + if now.Sub(seen) > expireEntriesAfter { + delete(d.seen, line) + } + } + d.mtx.Unlock() + case <-d.quit: + return + } + } +} + +// Log implements log.Logger. +func (d *Deduper) Log(keyvals ...interface{}) error { + line, err := encode(keyvals...) + if err != nil { + return err + } + + d.mtx.RLock() + last, ok := d.seen[line] + d.mtx.RUnlock() + + if ok && time.Since(last) < d.repeat { + return nil + } + + d.mtx.Lock() + if len(d.seen) < maxEntries { + d.seen[line] = time.Now() + } + d.mtx.Unlock() + + return d.next.Log(keyvals...) +} + +func encode(keyvals ...interface{}) (string, error) { + enc := logfmtEncoderPool.Get().(*logfmtEncoder) + enc.buf.Reset() + defer logfmtEncoderPool.Put(enc) + + if err := enc.EncodeKeyvals(keyvals...); err != nil { + return "", err + } + + // Add newline to the end of the buffer + if err := enc.EndRecord(); err != nil { + return "", err + } + + return enc.buf.String(), nil +} diff --git a/vendor/github.com/prometheus/prometheus/util/logging/file.go b/vendor/github.com/prometheus/prometheus/util/logging/file.go new file mode 100644 index 00000000000..6b5751b016c --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/util/logging/file.go @@ -0,0 +1,60 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging + +import ( + "os" + "time" + + "github.com/go-kit/log" + "github.com/pkg/errors" +) + +var timestampFormat = log.TimestampFormat( + func() time.Time { return time.Now().UTC() }, + "2006-01-02T15:04:05.000Z07:00", +) + +// JSONFileLogger represents a logger that writes JSON to a file. +type JSONFileLogger struct { + logger log.Logger + file *os.File +} + +// NewJSONFileLogger returns a new JSONFileLogger. +func NewJSONFileLogger(s string) (*JSONFileLogger, error) { + if s == "" { + return nil, nil + } + + f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666) + if err != nil { + return nil, errors.Wrap(err, "can't create json logger") + } + + return &JSONFileLogger{ + logger: log.With(log.NewJSONLogger(f), "ts", timestampFormat), + file: f, + }, nil +} + +// Close closes the underlying file. +func (l *JSONFileLogger) Close() error { + return l.file.Close() +} + +// Log calls the Log function of the underlying logger. +func (l *JSONFileLogger) Log(i ...interface{}) error { + return l.logger.Log(i...) +} diff --git a/vendor/github.com/prometheus/prometheus/util/logging/ratelimit.go b/vendor/github.com/prometheus/prometheus/util/logging/ratelimit.go new file mode 100644 index 00000000000..32d1e249e68 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/util/logging/ratelimit.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging + +import ( + "github.com/go-kit/log" + "golang.org/x/time/rate" +) + +type ratelimiter struct { + limiter *rate.Limiter + next log.Logger +} + +// RateLimit write to a logger. +func RateLimit(next log.Logger, limit rate.Limit) log.Logger { + return &ratelimiter{ + limiter: rate.NewLimiter(limit, int(limit)), + next: next, + } +} + +func (r *ratelimiter) Log(keyvals ...interface{}) error { + if r.limiter.Allow() { + return r.next.Log(keyvals...) + } + return nil +} diff --git a/vendor/github.com/prometheus/prometheus/util/osutil/hostname.go b/vendor/github.com/prometheus/prometheus/util/osutil/hostname.go new file mode 100644 index 00000000000..c44cb391b66 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/util/osutil/hostname.go @@ -0,0 +1,61 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package osutil + +import ( + "encoding" + "net" + "os" +) + +// GetFQDN returns a FQDN if it's possible, otherwise falls back to hostname. +func GetFQDN() (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", err + } + + ips, err := net.LookupIP(hostname) + if err != nil { + // Return the system hostname if we can't look up the IP address. + return hostname, nil + } + + lookup := func(ipStr encoding.TextMarshaler) (string, error) { + ip, err := ipStr.MarshalText() + if err != nil { + return "", err + } + hosts, err := net.LookupAddr(string(ip)) + if err != nil || len(hosts) == 0 { + return "", err + } + return hosts[0], nil + } + + for _, addr := range ips { + if ip := addr.To4(); ip != nil { + if fqdn, err := lookup(ip); err == nil { + return fqdn, nil + } + } + + if ip := addr.To16(); ip != nil { + if fqdn, err := lookup(ip); err == nil { + return fqdn, nil + } + } + } + return hostname, nil +} diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go new file mode 100644 index 00000000000..7a0b9ed1029 --- /dev/null +++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go @@ -0,0 +1,120 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package clientcredentials implements the OAuth2.0 "client credentials" token flow, +// also known as the "two-legged OAuth 2.0". +// +// This should be used when the client is acting on its own behalf or when the client +// is the resource owner. It may also be used when requesting access to protected +// resources based on an authorization previously arranged with the authorization +// server. +// +// See https://tools.ietf.org/html/rfc6749#section-4.4 +package clientcredentials // import "golang.org/x/oauth2/clientcredentials" + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strings" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" +) + +// Config describes a 2-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // TokenURL is the resource server's token endpoint + // URL. This is a constant specific to each server. + TokenURL string + + // Scope specifies optional requested permissions. + Scopes []string + + // EndpointParams specifies additional parameters for requests to the token endpoint. + EndpointParams url.Values + + // AuthStyle optionally specifies how the endpoint wants the + // client ID & client secret sent. The zero value means to + // auto-detect. + AuthStyle oauth2.AuthStyle +} + +// Token uses client credentials to retrieve a token. +// +// The provided context optionally controls which HTTP client is used. See the oauth2.HTTPClient variable. +func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) { + return c.TokenSource(ctx).Token() +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. +// +// The provided context optionally controls which HTTP client +// is returned. See the oauth2.HTTPClient variable. +// +// The returned Client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context and the +// client ID and client secret. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + source := &tokenSource{ + ctx: ctx, + conf: c, + } + return oauth2.ReuseTokenSource(nil, source) +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +// Token refreshes the token by using a new client credentials request. +// tokens received this way do not include a refresh token +func (c *tokenSource) Token() (*oauth2.Token, error) { + v := url.Values{ + "grant_type": {"client_credentials"}, + } + if len(c.conf.Scopes) > 0 { + v.Set("scope", strings.Join(c.conf.Scopes, " ")) + } + for k, p := range c.conf.EndpointParams { + // Allow grant_type to be overridden to allow interoperability with + // non-compliant implementations. + if _, ok := v[k]; ok && k != "grant_type" { + return nil, fmt.Errorf("oauth2: cannot overwrite parameter %q", k) + } + v[k] = p + } + + tk, err := internal.RetrieveToken(c.ctx, c.conf.ClientID, c.conf.ClientSecret, c.conf.TokenURL, v, internal.AuthStyle(c.conf.AuthStyle)) + if err != nil { + if rErr, ok := err.(*internal.RetrieveError); ok { + return nil, (*oauth2.RetrieveError)(rErr) + } + return nil, err + } + t := &oauth2.Token{ + AccessToken: tk.AccessToken, + TokenType: tk.TokenType, + RefreshToken: tk.RefreshToken, + Expiry: tk.Expiry, + } + return t.WithExtra(tk.Raw), nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 9f2f1a2a82a..5c39f2669b7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -29,6 +29,9 @@ github.com/VividCortex/gohistogram # github.com/alecthomas/kong v0.2.11 ## explicit; go 1.13 github.com/alecthomas/kong +# github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a +## explicit; go 1.15 +github.com/alecthomas/units # github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a ## explicit github.com/alicebob/gopher-json @@ -52,9 +55,19 @@ github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/awsutil github.com/aws/aws-sdk-go/aws/client github.com/aws/aws-sdk-go/aws/client/metadata +github.com/aws/aws-sdk-go/aws/corehandlers github.com/aws/aws-sdk-go/aws/credentials +github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds +github.com/aws/aws-sdk-go/aws/credentials/endpointcreds +github.com/aws/aws-sdk-go/aws/credentials/processcreds +github.com/aws/aws-sdk-go/aws/credentials/ssocreds +github.com/aws/aws-sdk-go/aws/credentials/stscreds +github.com/aws/aws-sdk-go/aws/csm +github.com/aws/aws-sdk-go/aws/defaults +github.com/aws/aws-sdk-go/aws/ec2metadata github.com/aws/aws-sdk-go/aws/endpoints github.com/aws/aws-sdk-go/aws/request +github.com/aws/aws-sdk-go/aws/session github.com/aws/aws-sdk-go/aws/signer/v4 github.com/aws/aws-sdk-go/internal/context github.com/aws/aws-sdk-go/internal/ini @@ -64,6 +77,7 @@ github.com/aws/aws-sdk-go/internal/s3shared/s3err github.com/aws/aws-sdk-go/internal/sdkio github.com/aws/aws-sdk-go/internal/sdkmath github.com/aws/aws-sdk-go/internal/sdkrand +github.com/aws/aws-sdk-go/internal/sdkuri github.com/aws/aws-sdk-go/internal/shareddefaults github.com/aws/aws-sdk-go/internal/strings github.com/aws/aws-sdk-go/internal/sync/singleflight @@ -71,12 +85,19 @@ github.com/aws/aws-sdk-go/private/checksum github.com/aws/aws-sdk-go/private/protocol github.com/aws/aws-sdk-go/private/protocol/eventstream github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi +github.com/aws/aws-sdk-go/private/protocol/json/jsonutil +github.com/aws/aws-sdk-go/private/protocol/jsonrpc github.com/aws/aws-sdk-go/private/protocol/query github.com/aws/aws-sdk-go/private/protocol/query/queryutil github.com/aws/aws-sdk-go/private/protocol/rest +github.com/aws/aws-sdk-go/private/protocol/restjson github.com/aws/aws-sdk-go/private/protocol/restxml github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil github.com/aws/aws-sdk-go/service/s3 +github.com/aws/aws-sdk-go/service/sso +github.com/aws/aws-sdk-go/service/sso/ssoiface +github.com/aws/aws-sdk-go/service/sts +github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile @@ -519,6 +540,9 @@ github.com/jedib0t/go-pretty/v6/text # github.com/jmespath/go-jmespath v0.4.0 ## explicit; go 1.14 github.com/jmespath/go-jmespath +# github.com/jpillora/backoff v1.0.0 +## explicit; go 1.13 +github.com/jpillora/backoff # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go @@ -537,6 +561,7 @@ github.com/klauspost/compress/gzip github.com/klauspost/compress/huff0 github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/s2 +github.com/klauspost/compress/snappy github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash # github.com/klauspost/cpuid v1.3.1 @@ -615,6 +640,9 @@ github.com/modern-go/reflect2 ## explicit; go 1.11 github.com/mostynb/go-grpc-compression/snappy github.com/mostynb/go-grpc-compression/zstd +# github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f +## explicit +github.com/mwitkow/go-conntrack # github.com/oklog/run v1.1.0 ## explicit; go 1.13 github.com/oklog/run @@ -714,10 +742,14 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint github.com/prometheus/client_model/go # github.com/prometheus/common v0.32.1 ## explicit; go 1.13 +github.com/prometheus/common/config github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model github.com/prometheus/common/version +# github.com/prometheus/common/sigv4 v0.1.0 +## explicit; go 1.15 +github.com/prometheus/common/sigv4 # github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289 ## explicit; go 1.13 github.com/prometheus/node_exporter/https @@ -728,17 +760,32 @@ github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util # github.com/prometheus/prometheus v1.8.2-0.20211217191541-41f1a8125e66 => github.com/prometheus/prometheus v1.8.2-0.20211119115433-692a54649ed7 ## explicit; go 1.14 +github.com/prometheus/prometheus/config +github.com/prometheus/prometheus/discovery +github.com/prometheus/prometheus/discovery/targetgroup github.com/prometheus/prometheus/model/exemplar github.com/prometheus/prometheus/model/labels +github.com/prometheus/prometheus/model/relabel +github.com/prometheus/prometheus/model/textparse +github.com/prometheus/prometheus/model/timestamp +github.com/prometheus/prometheus/model/value github.com/prometheus/prometheus/prompb +github.com/prometheus/prometheus/scrape github.com/prometheus/prometheus/storage +github.com/prometheus/prometheus/storage/remote github.com/prometheus/prometheus/tsdb/chunkenc github.com/prometheus/prometheus/tsdb/chunks github.com/prometheus/prometheus/tsdb/encoding github.com/prometheus/prometheus/tsdb/errors github.com/prometheus/prometheus/tsdb/fileutil github.com/prometheus/prometheus/tsdb/index +github.com/prometheus/prometheus/tsdb/record +github.com/prometheus/prometheus/tsdb/tombstones github.com/prometheus/prometheus/tsdb/tsdbutil +github.com/prometheus/prometheus/tsdb/wal +github.com/prometheus/prometheus/util/gate +github.com/prometheus/prometheus/util/logging +github.com/prometheus/prometheus/util/osutil github.com/prometheus/prometheus/util/pool github.com/prometheus/prometheus/util/testutil # github.com/prometheus/statsd_exporter v0.21.0 @@ -1093,6 +1140,7 @@ golang.org/x/net/trace ## explicit; go 1.11 golang.org/x/oauth2 golang.org/x/oauth2/authhandler +golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/google golang.org/x/oauth2/google/internal/externalaccount golang.org/x/oauth2/internal