Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(endpoints): scrape endpoints and not services #148

Merged
merged 8 commits into from
Apr 23, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions .github/workflows/load_test.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
on:
push:
tags:
- v*
branches:
- main
pull_request:

name: Load Tests
jobs:
Expand Down
2 changes: 0 additions & 2 deletions .github/workflows/push_pr.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ name: Push/PR pipeline

on:
push:
tags:
- v*
branches:
- main
pull_request:
Expand Down
9 changes: 5 additions & 4 deletions cmd/k8s-target-retriever/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ func main() {
}

kubeconf := endpoints.WithKubeConfig(*kubeConfigFile)
ktr, err := endpoints.NewKubernetesTargetRetriever("prometheus.io/scrape", false, kubeconf)
ktr, err := endpoints.NewKubernetesTargetRetriever("prometheus.io/scrape", false, true, true, kubeconf)
if err != nil {
logrus.Fatalf("could not create KubernetesTargetRetriever: %v", err)
}
Expand All @@ -44,15 +44,16 @@ func main() {

logrus.Infoln("connected to cluster, watching for targets")

for range time.Tick(time.Second * 5) {
for range time.Tick(time.Second * 7) {
paologallinaharbur marked this conversation as resolved.
Show resolved Hide resolved
targets, err := ktr.GetTargets()
logrus.Infof("###################################")

if err != nil {
logrus.Fatalf("could not get targets: %v", err)
}
for _, b := range targets {
logrus.Infof("%s[%s] %s", b.Name, b.Object.Kind, b.URL.String())
}

logrus.Println()
logrus.Infof("###################################")
}
}
2 changes: 2 additions & 0 deletions cmd/nri-prometheus/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,8 @@ func setViperDefaults(viper *viper.Viper) {
viper.SetDefault("insecure_skip_verify", false)
viper.SetDefault("standalone", true)
viper.SetDefault("disable_autodiscovery", false)
viper.SetDefault("scrape_services", true)
viper.SetDefault("scrape_endpoints", false)
viper.SetDefault("percentiles", []float64{50.0, 95.0, 99.0})
viper.SetDefault("worker_threads", 4)
}
Expand Down
9 changes: 8 additions & 1 deletion deploy/local.yaml.example
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ rules:
- "nodes/proxy"
- "pods"
- "services"
- "endpoints"
verbs: ["get", "list", "watch"]
- nonResourceURLs:
- /metrics
Expand Down Expand Up @@ -86,7 +87,13 @@ data:
# standalone: true
# How often the integration should run. Defaults to 30s.
# scrape_duration: "30s"
# The HTTP client timeout when fetching data from endpoints. Defaults to 30s.
# The HTTP client timeout when fetching data from targets. Defaults to 30s.
# scrape_services Allows to enable scraping the service and not the endpoints behind.
# When endpoints are scraped this is no longer needed
scrape_services: true
# scrape_endpoints Allows to enable scraping directly endpoints instead of services as prometheus service natively does.
# Please notice that depending on the number of endpoints behind a service the load can increase considerably
scrape_endpoints: false
# scrape_timeout: "30s"
# Wether the integration should run in verbose mode or not. Defaults to false.
verbose: false
Expand Down
13 changes: 11 additions & 2 deletions deploy/nri-prometheus.tmpl.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ rules:
- "nodes/proxy"
- "pods"
- "services"
- "endpoints"
verbs: ["get", "list", "watch"]
- nonResourceURLs:
- /metrics
Expand Down Expand Up @@ -89,7 +90,7 @@ data:
# How often the integration should run. Defaults to 30s.
# scrape_duration: "30s"

# The HTTP client timeout when fetching data from endpoints. Defaults to 5s.
# The HTTP client timeout when fetching data from targets. Defaults to 5s.
# scrape_timeout: "5s"

# How old must the entries used for calculating the counters delta be
Expand All @@ -114,11 +115,19 @@ data:
# The label used to identify scrapable targets. Defaults to "prometheus.io/scrape".
scrape_enabled_label: "prometheus.io/scrape"

# scrape_services Allows to enable scraping the service and not the endpoints behind.
# When endpoints are scraped this is no longer needed
paologallinaharbur marked this conversation as resolved.
Show resolved Hide resolved
scrape_services: true

# scrape_endpoints Allows to enable scraping directly endpoints instead of services as prometheus service natively does.
# Please notice that depending on the number of endpoints behind a service the load can increase considerably
scrape_endpoints: false

# Whether k8s nodes need to be labelled to be scraped or not. Defaults to true.
require_scrape_enabled_label_for_nodes: true

# Number of worker threads used for scraping targets.
# For large clusters with many (>400) endpoints, slowly increase until scrape
# For large clusters with many (>400) targets, slowly increase until scrape
# time falls between the desired `scrape_duration`.
# Increasing this value too much will result in huge memory consumption if too
# many metrics are being scraped.
Expand Down
4 changes: 3 additions & 1 deletion internal/cmd/scraper/scraper.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ type Config struct {
ScrapeTimeout time.Duration `mapstructure:"scrape_timeout"`
Standalone bool `mapstructure:"standalone"`
DisableAutodiscovery bool `mapstructure:"disable_autodiscovery"`
ScrapeServices bool `mapstructure:"scrape_services"`
ScrapeEndpoints bool `mapstructure:"scrape_endpoints"`
ScrapeDuration string `mapstructure:"scrape_duration"`
EmitterHarvestPeriod string `mapstructure:"emitter_harvest_period"`
MinEmitterHarvestPeriod string `mapstructure:"min_emitter_harvest_period"`
Expand Down Expand Up @@ -125,7 +127,7 @@ func RunWithEmitters(cfg *Config, emitters []integration.Emitter) error {
retrievers = append(retrievers, fixedRetriever)

if !cfg.DisableAutodiscovery {
kubernetesRetriever, err := endpoints.NewKubernetesTargetRetriever(cfg.ScrapeEnabledLabel, cfg.RequireScrapeEnabledLabelForNodes, endpoints.WithInClusterConfig())
kubernetesRetriever, err := endpoints.NewKubernetesTargetRetriever(cfg.ScrapeEnabledLabel, cfg.RequireScrapeEnabledLabelForNodes, cfg.ScrapeServices, cfg.ScrapeEndpoints, endpoints.WithInClusterConfig())
if err != nil {
logrus.WithError(err).Errorf("not possible to get a Kubernetes client. If you aren't running this integration in a Kubernetes cluster, you can ignore this error")
} else {
Expand Down
2 changes: 1 addition & 1 deletion internal/pkg/endpoints/endpoints.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ type TargetRetriever interface {
Name() string
}

// Object represents a kubernetes object like a pod or a service.
// Object represents a kubernetes object like a pod or a service or an endpoint.
type Object struct {
Name string
Kind string
Expand Down
Loading