From 7aff68847070f8dbc32d56e91f5c62dce2e1bbf3 Mon Sep 17 00:00:00 2001 From: Roberto Santalla Date: Wed, 30 Dec 2020 14:23:05 +0100 Subject: [PATCH 1/3] config: add commented values for advanced config flags --- deploy/nri-prometheus.tmpl.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/deploy/nri-prometheus.tmpl.yaml b/deploy/nri-prometheus.tmpl.yaml index 6e3c7893..07e5c17e 100644 --- a/deploy/nri-prometheus.tmpl.yaml +++ b/deploy/nri-prometheus.tmpl.yaml @@ -117,6 +117,22 @@ data: # Whether k8s nodes need to be labelled to be scraped or not. Defaults to true. require_scrape_enabled_label_for_nodes: true + # Number of worker threads used for scraping targets. + # For large clusters with many (>100) endpoints, slowly increase until scrape + # time falls between the desired `scrape_duration`. + # Increasing this value too much will result in huge memory consumption if too + # many metrics are being scraped. + # Default: 4 + # worker_threads: 4 + + # Maximum number of metrics to keep in memory until a report is triggered. + # Changing this value is not recommended unless instructed by the New Relic support team. + # max_stored_metrics: 10000 + + # Minimum amount of time to wait between reports. Cannot be lowered than the default, 200ms. + # Changing this value is not recommended unless instructed by the New Relic support team. + # min_emitter_harvest_period: 200ms + # targets: # - description: Secure etcd example # urls: ["https://192.168.3.1:2379", "https://192.168.3.2:2379", "https://192.168.3.3:2379"] From a46a7a9fccae9b6009d409d5a81a43bda5ad8083 Mon Sep 17 00:00:00 2001 From: Roberto Santalla Date: Wed, 30 Dec 2020 14:26:09 +0100 Subject: [PATCH 2/3] config: add worker_threads flag to sample config --- configs/nri-prometheus-config.yml.sample | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/configs/nri-prometheus-config.yml.sample b/configs/nri-prometheus-config.yml.sample index 839fffa5..71acac77 100644 --- a/configs/nri-prometheus-config.yml.sample +++ b/configs/nri-prometheus-config.yml.sample @@ -32,6 +32,14 @@ integrations: # Length in time to distribute the scraping from the endpoints. scrape_duration: "5s" + # Number of worker threads used for scraping targets. + # For large clusters with many (>100) endpoints, slowly increase until scrape + # time falls between the desired `scrape_duration`. + # Increasing this value too much will result in huge memory consumption if too + # many metrics are being scraped. + # Default: 4 + # worker_threads: 4 + # Whether the integration should skip TLS verification or not. Defaults to false. insecure_skip_verify: false From bc5bbb550be7d0f08332461c3b231d3f1e83e102 Mon Sep 17 00:00:00 2001 From: Roberto Santalla Date: Wed, 30 Dec 2020 15:26:14 +0100 Subject: [PATCH 3/3] config: s/100/400/ --- configs/nri-prometheus-config.yml.sample | 2 +- deploy/nri-prometheus.tmpl.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/configs/nri-prometheus-config.yml.sample b/configs/nri-prometheus-config.yml.sample index 71acac77..9cfe2cc3 100644 --- a/configs/nri-prometheus-config.yml.sample +++ b/configs/nri-prometheus-config.yml.sample @@ -33,7 +33,7 @@ integrations: scrape_duration: "5s" # Number of worker threads used for scraping targets. - # For large clusters with many (>100) endpoints, slowly increase until scrape + # For large clusters with many (>400) endpoints, slowly increase until scrape # time falls between the desired `scrape_duration`. # Increasing this value too much will result in huge memory consumption if too # many metrics are being scraped. diff --git a/deploy/nri-prometheus.tmpl.yaml b/deploy/nri-prometheus.tmpl.yaml index 07e5c17e..429b76ed 100644 --- a/deploy/nri-prometheus.tmpl.yaml +++ b/deploy/nri-prometheus.tmpl.yaml @@ -118,7 +118,7 @@ data: require_scrape_enabled_label_for_nodes: true # Number of worker threads used for scraping targets. - # For large clusters with many (>100) endpoints, slowly increase until scrape + # For large clusters with many (>400) endpoints, slowly increase until scrape # time falls between the desired `scrape_duration`. # Increasing this value too much will result in huge memory consumption if too # many metrics are being scraped.