From 176fc2fef5f5ed452f2df1043971c2607963f4f7 Mon Sep 17 00:00:00 2001 From: Will Krause Date: Thu, 9 Jun 2022 19:20:47 -0400 Subject: [PATCH 1/6] add json_v2 support --- config/config.go | 87 +- go.mod | 2 +- go.sum | 12 +- install/config.toml | 6182 +++++++++++++++++ plugins/parsers/json_v2/README.md | 268 + plugins/parsers/json_v2/parser.go | 689 ++ plugins/parsers/json_v2/parser_test.go | 113 + .../10670/circonus-unified-agent.conf | 26 + .../json_v2/testdata/10670/expected.out | 1 + .../parsers/json_v2/testdata/10670/input.json | 10 + .../circonus-unified-agent.conf | 10 + .../testdata/array_of_objects/expected.out | 2 + .../testdata/array_of_objects/input.json | 14 + .../circonus-unified-agent.conf | 10 + .../testdata/complex_nesting/expected.out | 3 + .../testdata/complex_nesting/input.json | 31 + .../circonus-unified-agent.conf | 15 + .../testdata/fields_and_tags/expected.out | 2 + .../testdata/fields_and_tags/input.json | 46 + .../circonus-unified-agent.conf | 11 + .../fields_and_tags_complex/expected.out | 5 + .../fields_and_tags_complex/input.json | 87 + .../large_numbers/circonus-unified-agent.conf | 23 + .../testdata/large_numbers/expected.out | 3 + .../json_v2/testdata/large_numbers/input.json | 17 + .../circonus-unified-agent.conf | 10 + .../measurement_name_int/expected.out | 1 + .../testdata/measurement_name_int/input.json | 19 + .../circonus-unified-agent.conf | 16 + .../mix_field_and_object/expected.out | 1 + .../testdata/mix_field_and_object/input.json | 44 + .../circonus-unified-agent.conf | 12 + .../multiple_arrays_in_object/expected.out | 8 + .../multiple_arrays_in_object/input.json | 24 + .../circonus-unified-agent.conf | 19 + .../testdata/multiple_json_input/expected.out | 2 + .../testdata/multiple_json_input/input_1.json | 87 + .../testdata/multiple_json_input/input_2.json | 134 + .../circonus-unified-agent.conf | 11 + .../testdata/multiple_timestamps/expected.out | 2 + .../testdata/multiple_timestamps/input.json | 12 + .../circonus-unified-agent.conf | 17 + .../nested_and_nonnested_tags/expected.out | 12 + .../nested_and_nonnested_tags/input.json | 174 + .../circonus-unified-agent.conf | 16 + .../nested_array_of_objects/expected.out | 2 + .../nested_array_of_objects/input.json | 36 + .../nested_tags/circonus-unified-agent.conf | 13 + .../json_v2/testdata/nested_tags/expected.out | 2 + .../json_v2/testdata/nested_tags/input.json | 16 + .../circonus-unified-agent.conf | 15 + .../testdata/nested_tags_complex/expected.out | 3 + .../testdata/nested_tags_complex/input.json | 35 + .../testdata/null/circonus-unified-agent.conf | 9 + .../json_v2/testdata/null/expected.out | 1 + .../parsers/json_v2/testdata/null/input.json | 40 + .../object/circonus-unified-agent.conf | 13 + .../json_v2/testdata/object/expected.out | 5 + .../json_v2/testdata/object/input.json | 87 + .../circonus-unified-agent.conf | 13 + .../testdata/object_timestamp/expected.out | 3 + .../testdata/object_timestamp/input.json | 19 + .../optional/circonus-unified-agent.conf | 16 + .../json_v2/testdata/optional/expected.out | 0 .../json_v2/testdata/optional/input.json | 3 + .../circonus-unified-agent.conf | 22 + .../testdata/optional_objects/expected.out | 3 + .../testdata/optional_objects/input_1.json | 1 + .../testdata/optional_objects/input_2.json | 1 + .../circonus-unified-agent.conf | 18 + .../subfieldtag_in_object/expected.out | 1 + .../testdata/subfieldtag_in_object/input.json | 97 + .../circonus-unified-agent.conf | 17 + .../subfieldtag_in_object_2/expected.out | 4 + .../subfieldtag_in_object_2/input.json | 10 + .../timestamp/circonus-unified-agent.conf | 12 + .../json_v2/testdata/timestamp/expected.out | 4 + .../json_v2/testdata/timestamp/input.json | 25 + .../timestamp_ns/circonus-unified-agent.conf | 12 + .../testdata/timestamp_ns/expected.out | 2 + .../json_v2/testdata/timestamp_ns/input.json | 7 + .../circonus-unified-agent.conf | 9 + .../testdata/timestamp_rfc3339/expected.out | 1 + .../testdata/timestamp_rfc3339/input.json | 4 + .../types/circonus-unified-agent.conf | 109 + .../json_v2/testdata/types/expected.out | 4 + .../parsers/json_v2/testdata/types/input.json | 22 + .../wrong_path/circonus-unified-agent.conf | 50 + .../json_v2/testdata/wrong_path/expected.out | 0 .../json_v2/testdata/wrong_path/input.json | 3 + plugins/parsers/registry.go | 30 + 91 files changed, 9081 insertions(+), 8 deletions(-) create mode 100644 install/config.toml create mode 100644 plugins/parsers/json_v2/README.md create mode 100644 plugins/parsers/json_v2/parser.go create mode 100644 plugins/parsers/json_v2/parser_test.go create mode 100644 plugins/parsers/json_v2/testdata/10670/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/10670/expected.out create mode 100644 plugins/parsers/json_v2/testdata/10670/input.json create mode 100644 plugins/parsers/json_v2/testdata/array_of_objects/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/array_of_objects/expected.out create mode 100644 plugins/parsers/json_v2/testdata/array_of_objects/input.json create mode 100644 plugins/parsers/json_v2/testdata/complex_nesting/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/complex_nesting/expected.out create mode 100644 plugins/parsers/json_v2/testdata/complex_nesting/input.json create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags/expected.out create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags/input.json create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags_complex/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags_complex/expected.out create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags_complex/input.json create mode 100644 plugins/parsers/json_v2/testdata/large_numbers/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/large_numbers/expected.out create mode 100644 plugins/parsers/json_v2/testdata/large_numbers/input.json create mode 100644 plugins/parsers/json_v2/testdata/measurement_name_int/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/measurement_name_int/expected.out create mode 100644 plugins/parsers/json_v2/testdata/measurement_name_int/input.json create mode 100644 plugins/parsers/json_v2/testdata/mix_field_and_object/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out create mode 100644 plugins/parsers/json_v2/testdata/mix_field_and_object/input.json create mode 100644 plugins/parsers/json_v2/testdata/multiple_arrays_in_object/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out create mode 100644 plugins/parsers/json_v2/testdata/multiple_arrays_in_object/input.json create mode 100644 plugins/parsers/json_v2/testdata/multiple_json_input/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/multiple_json_input/expected.out create mode 100644 plugins/parsers/json_v2/testdata/multiple_json_input/input_1.json create mode 100644 plugins/parsers/json_v2/testdata/multiple_json_input/input_2.json create mode 100644 plugins/parsers/json_v2/testdata/multiple_timestamps/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/multiple_timestamps/expected.out create mode 100644 plugins/parsers/json_v2/testdata/multiple_timestamps/input.json create mode 100644 plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/expected.out create mode 100644 plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/input.json create mode 100644 plugins/parsers/json_v2/testdata/nested_array_of_objects/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/nested_array_of_objects/expected.out create mode 100644 plugins/parsers/json_v2/testdata/nested_array_of_objects/input.json create mode 100644 plugins/parsers/json_v2/testdata/nested_tags/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/nested_tags/expected.out create mode 100644 plugins/parsers/json_v2/testdata/nested_tags/input.json create mode 100644 plugins/parsers/json_v2/testdata/nested_tags_complex/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/nested_tags_complex/expected.out create mode 100644 plugins/parsers/json_v2/testdata/nested_tags_complex/input.json create mode 100644 plugins/parsers/json_v2/testdata/null/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/null/expected.out create mode 100644 plugins/parsers/json_v2/testdata/null/input.json create mode 100644 plugins/parsers/json_v2/testdata/object/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/object/expected.out create mode 100644 plugins/parsers/json_v2/testdata/object/input.json create mode 100644 plugins/parsers/json_v2/testdata/object_timestamp/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/object_timestamp/expected.out create mode 100644 plugins/parsers/json_v2/testdata/object_timestamp/input.json create mode 100644 plugins/parsers/json_v2/testdata/optional/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/optional/expected.out create mode 100644 plugins/parsers/json_v2/testdata/optional/input.json create mode 100644 plugins/parsers/json_v2/testdata/optional_objects/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/optional_objects/expected.out create mode 100644 plugins/parsers/json_v2/testdata/optional_objects/input_1.json create mode 100644 plugins/parsers/json_v2/testdata/optional_objects/input_2.json create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object/expected.out create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object/input.json create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/expected.out create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/input.json create mode 100644 plugins/parsers/json_v2/testdata/timestamp/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/timestamp/expected.out create mode 100644 plugins/parsers/json_v2/testdata/timestamp/input.json create mode 100644 plugins/parsers/json_v2/testdata/timestamp_ns/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/timestamp_ns/expected.out create mode 100644 plugins/parsers/json_v2/testdata/timestamp_ns/input.json create mode 100644 plugins/parsers/json_v2/testdata/timestamp_rfc3339/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/timestamp_rfc3339/expected.out create mode 100644 plugins/parsers/json_v2/testdata/timestamp_rfc3339/input.json create mode 100644 plugins/parsers/json_v2/testdata/types/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/types/expected.out create mode 100644 plugins/parsers/json_v2/testdata/types/input.json create mode 100644 plugins/parsers/json_v2/testdata/wrong_path/circonus-unified-agent.conf create mode 100644 plugins/parsers/json_v2/testdata/wrong_path/expected.out create mode 100644 plugins/parsers/json_v2/testdata/wrong_path/input.json diff --git a/config/config.go b/config/config.go index e2d6d0a3..33019890 100644 --- a/config/config.go +++ b/config/config.go @@ -24,6 +24,7 @@ import ( "github.com/circonus-labs/circonus-unified-agent/plugins/inputs" "github.com/circonus-labs/circonus-unified-agent/plugins/outputs" "github.com/circonus-labs/circonus-unified-agent/plugins/parsers" + "github.com/circonus-labs/circonus-unified-agent/plugins/parsers/json_v2" "github.com/circonus-labs/circonus-unified-agent/plugins/processors" "github.com/circonus-labs/circonus-unified-agent/plugins/serializers" "github.com/influxdata/toml" @@ -1416,6 +1417,52 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, c.getFieldBool(tbl, "csv_trim_space", &pc.CSVTrimSpace) c.getFieldStringSlice(tbl, "form_urlencoded_tag_keys", &pc.FormUrlencodedTagKeys) + //for JSONPath parser + if node, ok := tbl.Fields["json_v2"]; ok { + if metricConfigs, ok := node.([]*ast.Table); ok { + pc.JSONV2Config = make([]parsers.JSONV2Config, len(metricConfigs)) + for i, metricConfig := range metricConfigs { + mc := pc.JSONV2Config[i] + c.getFieldString(metricConfig, "measurement_name", &mc.MeasurementName) + if mc.MeasurementName == "" { + mc.MeasurementName = name + } + c.getFieldString(metricConfig, "measurement_name_path", &mc.MeasurementNamePath) + c.getFieldString(metricConfig, "timestamp_path", &mc.TimestampPath) + c.getFieldString(metricConfig, "timestamp_format", &mc.TimestampFormat) + c.getFieldString(metricConfig, "timestamp_timezone", &mc.TimestampTimezone) + + mc.Fields = getFieldSubtable(c, metricConfig) + mc.Tags = getTagSubtable(c, metricConfig) + + if objectconfigs, ok := metricConfig.Fields["object"]; ok { + if objectconfigs, ok := objectconfigs.([]*ast.Table); ok { + for _, objectConfig := range objectconfigs { + var o json_v2.JSONObject + c.getFieldString(objectConfig, "path", &o.Path) + c.getFieldBool(objectConfig, "optional", &o.Optional) + c.getFieldString(objectConfig, "timestamp_key", &o.TimestampKey) + c.getFieldString(objectConfig, "timestamp_format", &o.TimestampFormat) + c.getFieldString(objectConfig, "timestamp_timezone", &o.TimestampTimezone) + c.getFieldBool(objectConfig, "disable_prepend_keys", &o.DisablePrependKeys) + c.getFieldStringSlice(objectConfig, "included_keys", &o.IncludedKeys) + c.getFieldStringSlice(objectConfig, "excluded_keys", &o.ExcludedKeys) + c.getFieldStringSlice(objectConfig, "tags", &o.Tags) + c.getFieldStringMap(objectConfig, "renames", &o.Renames) + c.getFieldStringMap(objectConfig, "fields", &o.Fields) + + o.FieldPaths = getFieldSubtable(c, objectConfig) + o.TagPaths = getTagSubtable(c, objectConfig) + + mc.JSONObjects = append(mc.JSONObjects, o) + } + } + } + + pc.JSONV2Config[i] = mc + } + } + } pc.MetricName = name @@ -1426,6 +1473,44 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, return pc, nil } +func getFieldSubtable(c *Config, metricConfig *ast.Table) []json_v2.DataSet { + var fields []json_v2.DataSet + + if fieldConfigs, ok := metricConfig.Fields["field"]; ok { + if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok { + for _, fieldconfig := range fieldConfigs { + var f json_v2.DataSet + c.getFieldString(fieldconfig, "path", &f.Path) + c.getFieldString(fieldconfig, "rename", &f.Rename) + c.getFieldString(fieldconfig, "type", &f.Type) + c.getFieldBool(fieldconfig, "optional", &f.Optional) + fields = append(fields, f) + } + } + } + + return fields +} + +func getTagSubtable(c *Config, metricConfig *ast.Table) []json_v2.DataSet { + var tags []json_v2.DataSet + + if fieldConfigs, ok := metricConfig.Fields["tag"]; ok { + if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok { + for _, fieldconfig := range fieldConfigs { + var t json_v2.DataSet + c.getFieldString(fieldconfig, "path", &t.Path) + c.getFieldString(fieldconfig, "rename", &t.Rename) + t.Type = "string" + tags = append(tags, t) + c.getFieldBool(fieldconfig, "optional", &t.Optional) + } + } + } + + return tags +} + // buildSerializer grabs the necessary entries from the ast.Table for creating // a serializers.Serializer object, and creates it, which can then be added onto // an Output object. @@ -1515,7 +1600,7 @@ func (c *Config) missingTomlField(typ reflect.Type, key string) error { "grok_custom_patterns", "grok_named_patterns", "grok_patterns", "grok_timezone", "grok_unique_timestamp", "influx_max_line_bytes", "influx_sort_fields", "influx_uint_support", "interval", "json_name_key", "json_query", "json_strict", "json_string_fields", - "json_time_format", "json_time_key", "json_timestamp_units", "json_timezone", + "json_time_format", "json_time_key", "json_timestamp_units", "json_timezone", "json_v2", "metric_batch_size", "metric_buffer_limit", "name_override", "name_prefix", "name_suffix", "namedrop", "namepass", "order", "pass", "period", "precision", "prefix", "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", diff --git a/go.mod b/go.mod index ac426718..48d540c7 100644 --- a/go.mod +++ b/go.mod @@ -111,7 +111,7 @@ require ( github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 github.com/stretchr/testify v1.7.1 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 - github.com/tidwall/gjson v1.6.0 + github.com/tidwall/gjson v1.14.1 github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect github.com/vjeantet/grok v1.0.1 diff --git a/go.sum b/go.sum index 8de0567c..257d4df9 100644 --- a/go.sum +++ b/go.sum @@ -572,12 +572,12 @@ github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMT github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= -github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc= -github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= -github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= -github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/gjson v1.14.1 h1:iymTbGkQBhveq21bEvAQ81I0LEBork8BFe1CUZXdyuo= +github.com/tidwall/gjson v1.14.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= diff --git a/install/config.toml b/install/config.toml new file mode 100644 index 00000000..da5882bd --- /dev/null +++ b/install/config.toml @@ -0,0 +1,6182 @@ +# Circonus Unified Agent Configuration +# +# Circonus Unified Agent is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'circonus-unified-agent -config circonus-unified-agent.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply surround +# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), +# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) + + +# Global tags can be specified here in key="value" format. +[global_tags] + # __rollup = "false" + # dc = "us-east-1" # will tag all metrics with dc=us-east-1 + # rack = "1a" + ## Environment variables can be used as tags, and throughout the config file + device_type = "laptop" + user = "${USER}" + + +# Configuration for circonus-unified-agent +[agent] + ## Default data collection interval for all inputs + interval = "10s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## circonus-unified-agent will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that circonus-unified-agent sends to output plugins. + metric_batch_size = 1000 + + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter + flush_interval = "10s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of circonus-unified-agent instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 + + ## Override default hostname, if empty use os.Hostname() + ## It is !!important!! to set the hostname when using containers to prevent + ## a unique check being created every time the container starts. + hostname = "" + + ## DEPRECATED: a host tag will NOT be applied to each metric + omit_hostname = false + + [agent.circonus] + ## Circonus API token key must be provided to use the agent + ## REQUIRED + api_token = "bob" + + ## Circonus API application (associated with token) + ## Optional + # api_app = "circonus-unified-agent" + + ## Circonus API URL + ## Optional + # api_url = "https://api.circonus.com/" + api_url = "https://api.circonus.com/" + + ## Circonus API TLS CA file + ## Optional + ## Use for internal deployments with private certificates + # api_tls_ca = "/opt/circonus/unified-agent/etc/circonus_api_ca.pem" + + ## Broker + ## Optional + ## Explicit broker id or blank (default blank, auto select) + ## Broker CID - navigate to broker page in UI. Show API Object use the + ## value of _cid attribute. + # broker = "/broker/35" + + ## Cache check configurations + ## Optional + ## Performance optimization with lots of plugins (or instances of plugins) + # cache_configs = true + ## + ## Cache directory + ## Optional (required if cache_configs is true) + ## Note: cache_dir must be read/write for the user running the cua process + # cache_dir = "/opt/circonus/unified-agent/etc/cache.d" + + ## Debug circonus api calls and trap submissions + ## Optional + # debug_api = true + + ## Trace metric submissions + ## Optional + ## Note: directory to write metrics sent to broker (must be writeable by user running cua process) + ## output json sent to broker (path to write files to or '-' for logger) + # trace_metrics = "/opt/circonus/unified-agent/trace.d" + + + +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + + +# Configuration for sending metrics to Circonus +[[outputs.circonus]] + ## Check name prefix - unique prefix to use for all checks created by this instance + ## default is the hostname from the OS. If set, "host" tag on metrics will be + ## overridden with this value. For containers, use omit_hostname=true in agent section + ## and set this value, so that the plugin will be able to predictively find the check + ## for this instance. Otherwise, the container's os.Hostname() will be used + ## (resulting in a new check being created every time the container starts). + ## example: + # check_name_prefix = "example" + + ## One check - all metrics go to a single check vs one check per input plugin + ## NOTE: this effectively disables automatic dashboards for supported plugins + # one_check = false + +# # Send metrics to nowhere at all +# [[outputs.discard]] +# # no configuration + +# # Send metrics to file(s) +# [[outputs.file]] +# ## Files to write to, "stdout" is a specially handled file. +# files = ["stdout", "/tmp/metrics.out"] +# +# ## Use batch serialization format instead of line based delimiting. The +# ## batch format allows for the production of non line based output formats and +# ## may more efficiently encode metric groups. +# # use_batch_format = false +# +# ## The file will be rotated after the time interval specified. When set +# ## to 0 no time based rotation is performed. +# # rotation_interval = "0d" +# +# ## The logfile will be rotated when it becomes larger than the specified +# ## size. When set to 0 no size based rotation is performed. +# # rotation_max_size = "0MB" +# +# ## Maximum number of rotated archives to keep, any older logs are deleted. +# ## If set to -1, no archives are removed. +# # rotation_max_archives = 5 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + +# [[outputs.health]] +# ## Address and port to listen on. +# ## ex: service_address = "http://localhost:8080" +# ## service_address = "unix:///var/run/cua-health.sock" +# # service_address = "http://:8080" + +# ## The maximum duration for reading the entire request. +# # read_timeout = "5s" +# ## The maximum duration for writing the entire response. +# # write_timeout = "5s" + +# ## Username and password to accept for HTTP basic authentication. +# # basic_username = "user1" +# # basic_password = "secret" + +# ## Allowed CA certificates for client certificates. +# # tls_allowed_cacerts = ["/opt/circonus/unified-agent/etc/clientca.pem"] + +# ## TLS server certificate and private key. +# # tls_cert = "/opt/circonus/unified-agent/etc/cert.pem" +# # tls_key = "/opt/circonus/unified-agent/etc/key.pem" + +# ## One or more check sub-tables should be defined, it is also recommended to +# ## use metric filtering to limit the metrics that flow into this output. +# ## +# ## When using the default buffer sizes, this example will fail when the +# ## metric buffer is half full. +# ## +# ## namepass = ["internal_write"] +# ## tagpass = { output = ["influxdb"] } +# ## +# ## [[outputs.health.compares]] +# ## field = "buffer_size" +# ## lt = 5000.0 +# ## +# ## [[outputs.health.contains]] +# ## field = "buffer_size" + + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + + +# # Clone metrics and apply modifications. +# [[processors.clone]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.clone.tags] +# # additional_tag = "tag_value" + + +# # Convert values to another metric value type +# [[processors.converter]] +# ## Tags to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.tags] +# measurement = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] +# +# ## Fields to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.fields] +# measurement = [] +# tag = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] + + +# # Dates measurements, tags, and fields that pass through this filter. +# [[processors.date]] +# ## New tag to create +# tag_key = "month" +# +# ## New field to create (cannot set both field_key and tag_key) +# # field_key = "month" +# +# ## Date format string, must be a representation of the Go "reference time" +# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". +# date_format = "Jan" +# +# ## If destination is a field, date format can also be one of +# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. +# # date_format = "unix" +# +# ## Offset duration added to the date string when writing the new tag. +# # date_offset = "0s" +# +# ## Timezone to use when creating the tag or field using a reference time +# ## string. This can be set to one of "UTC", "Local", or to a location name +# ## in the IANA Time Zone database. +# ## example: timezone = "America/Los_Angeles" +# # timezone = "UTC" + + +# # Filter metrics with repeating field values +# [[processors.dedup]] +# ## Maximum time to suppress output +# dedup_interval = "600s" + + +# # Defaults sets default value(s) for specified fields that are not set on incoming metrics. +# [[processors.defaults]] +# ## Ensures a set of fields always exists on your metric(s) with their +# ## respective default value. +# ## For any given field pair (key = default), if it's not set, a field +# ## is set on the metric with the specified default. +# ## +# ## A field is considered not set if it is nil on the incoming metric; +# ## or it is not nil but its value is an empty string or is a string +# ## of one or more spaces. +# ## = +# # [processors.defaults.fields] +# # field_1 = "bar" +# # time_idle = 0 +# # is_error = true + + +# # Map enum values according to given table. +# [[processors.enum]] +# [[processors.enum.mapping]] +# ## Name of the field to map +# field = "status" +# +# ## Name of the tag to map +# # tag = "status" +# +# ## Destination tag or field to be used for the mapped value. By default the +# ## source tag or field is used, overwriting the original value. +# dest = "status_code" +# +# ## Default value to be used for all values not contained in the mapping +# ## table. When unset, the unmodified value for the field will be used if no +# ## match is found. +# # default = 0 +# +# ## Table of mappings +# [processors.enum.mapping.value_mappings] +# green = 1 +# amber = 2 +# red = 3 + + +# # Run executable as long-running processor plugin +# [[processors.execd]] +# ## Program to run as daemon +# ## eg: command = ["/path/to/your_program", "arg1", "arg2"] +# command = ["cat"] +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" + + +# # Performs file path manipulations on tags and fields +# [[processors.filepath]] +# ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag +# # [[processors.filepath.basename]] +# # tag = "path" +# # dest = "basepath" +# +# ## Treat the field value as a path and keep all but the last element of path, typically the path's directory +# # [[processors.filepath.dirname]] +# # field = "path" +# +# ## Treat the tag value as a path, converting it to its the last element without its suffix +# # [[processors.filepath.stem]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to the shortest path name equivalent +# ## to path by purely lexical processing +# # [[processors.filepath.clean]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to a relative path that is lexically +# ## equivalent to the source path when joined to 'base_path' +# # [[processors.filepath.rel]] +# # tag = "path" +# # base_path = "/var/log" +# +# ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only +# ## effect on Windows +# # [[processors.filepath.toslash]] +# # tag = "path" + + +# # Add a tag of the network interface name looked up over SNMP by interface number +# [[processors.ifname]] +# ## Name of tag holding the interface number +# # tag = "ifIndex" +# +# ## Name of output tag where service name will be added +# # dest = "ifName" +# +# ## Name of tag of the SNMP agent to request the interface name from +# # agent = "agent" +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## SNMP community string. +# # community = "public" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## max_parallel_lookups is the maximum number of SNMP requests to +# ## make at the same time. +# # max_parallel_lookups = 100 +# +# ## ordered controls whether or not the metrics need to stay in the +# ## same order this plugin received them in. If false, this plugin +# ## may change the order when data is cached. If you need metrics to +# ## stay in order set this to true. keeping the metrics ordered may +# ## be slightly slower +# # ordered = false +# +# ## cache_ttl is the amount of time interface names are cached for a +# ## given agent. After this period elapses if names are needed they +# ## will be retrieved again. +# # cache_ttl = "8h" + + +# # Apply metric modifications using override semantics. +# [[processors.override]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.override.tags] +# # additional_tag = "tag_value" + + +# # Parse a value in a specified field/tag(s) and add the result in a new metric +# [[processors.parser]] +# ## The name of the fields whose value will be parsed. +# parse_fields = [] +# +# ## If true, incoming metrics are not emitted. +# drop_original = false +# +# ## If set to override, emitted metrics will be merged by overriding the +# ## original metric using the newly parsed metrics. +# merge = "override" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Rotate a single valued metric into a multi field metric +# [[processors.pivot]] +# ## Tag to use for naming the new field. +# tag_key = "name" +# ## Field to use as the value of the new field. +# value_key = "value" + + +# # Given a tag of a TCP or UDP port number, add a tag of the service name looked up in the system services file +# [[processors.port_name]] +# [[processors.port_name]] +# ## Name of tag holding the port number +# # tag = "port" +# +# ## Name of output tag where service name will be added +# # dest = "service" +# +# ## Default tcp or udp +# # default_protocol = "tcp" + + +# # Print all metrics that pass through this filter. +# [[processors.printer]] + + +# # Transforms tag and field values with regex pattern +# [[processors.regex]] +# ## Tag and field conversions defined in a separate sub-tables +# # [[processors.regex.tags]] +# # ## Tag to change +# # key = "resp_code" +# # ## Regular expression to match on a tag value +# # pattern = "^(\\d)\\d\\d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}xx" +# +# # [[processors.regex.fields]] +# # ## Field to change +# # key = "request" +# # ## All the power of the Go regular expressions available here +# # ## For example, named subgroups +# # pattern = "^/api(?P/[\\w/]+)\\S*" +# # replacement = "${method}" +# # ## If result_key is present, a new field will be created +# # ## instead of changing existing field +# # result_key = "method" +# +# ## Multiple conversions may be applied for one field sequentially +# ## Let's extract one more value +# # [[processors.regex.fields]] +# # key = "request" +# # pattern = ".*category=(\\w+).*" +# # replacement = "${1}" +# # result_key = "search_category" + + +# # Rename measurements, tags, and fields that pass through this filter. +# [[processors.rename]] + + +# # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name +# [[processors.reverse_dns]] +# ## For optimal performance, you may want to limit which metrics are passed to this +# ## processor. eg: +# ## namepass = ["my_metric_*"] +# +# ## cache_ttl is how long the dns entries should stay cached for. +# ## generally longer is better, but if you expect a large number of diverse lookups +# ## you'll want to consider memory use. +# cache_ttl = "24h" +# +# ## lookup_timeout is how long should you wait for a single dns request to repsond. +# ## this is also the maximum acceptable latency for a metric travelling through +# ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will +# ## be passed on unaltered. +# ## multiple simultaneous resolution requests for the same IP will only make a +# ## single rDNS request, and they will all wait for the answer for this long. +# lookup_timeout = "3s" +# +# ## max_parallel_lookups is the maximum number of dns requests to be in flight +# ## at the same time. Requesting hitting cached values do not count against this +# ## total, and neither do mulptiple requests for the same IP. +# ## It's probably best to keep this number fairly low. +# max_parallel_lookups = 10 +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## keeping the metrics ordered may be slightly slower. +# ordered = false +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the field "source_ip", and put the result in the field "source_name" +# field = "source_ip" +# dest = "source_name" +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the tag "destination_ip", and put the result in the tag +# ## "destination_name". +# tag = "destination_ip" +# dest = "destination_name" +# +# ## If you would prefer destination_name to be a field instead, you can use a +# ## processors.converter after this one, specifying the order attribute. + + +# # Add the S2 Cell ID as a tag based on latitude and longitude fields +# [[processors.s2geo]] +# ## The name of the lat and lon fields containing WGS-84 latitude and +# ## longitude in decimal degrees. +# # lat_field = "lat" +# # lon_field = "lon" +# +# ## New tag to create +# # tag_key = "s2_cell_id" +# +# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) +# # cell_level = 9 + + +# # Process metrics using a Starlark script +# [[processors.starlark]] +# ## The Starlark source can be set as a string in this configuration file, or +# ## by referencing a file containing the script. Only one source or script +# ## should be set at once. +# ## +# ## Source of the Starlark script. +# source = ''' +# def apply(metric): +# return metric +# ''' +# +# ## File containing a Starlark script. +# # script = "/usr/local/bin/myscript.star" + + +# # Perform string processing on tags, fields, and measurements +# [[processors.strings]] +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# +# ## Convert a field value to lowercase and store in a new field +# # [[processors.strings.lowercase]] +# # field = "uri_stem" +# # dest = "uri_stem_normalised" +# +# ## Convert a field value to titlecase +# # [[processors.strings.titlecase]] +# # field = "status" +# +# ## Trim leading and trailing whitespace using the default cutset +# # [[processors.strings.trim]] +# # field = "message" +# +# ## Trim leading characters in cutset +# # [[processors.strings.trim_left]] +# # field = "message" +# # cutset = "\t" +# +# ## Trim trailing characters in cutset +# # [[processors.strings.trim_right]] +# # field = "message" +# # cutset = "\r\n" +# +# ## Trim the given prefix from the field +# # [[processors.strings.trim_prefix]] +# # field = "my_value" +# # prefix = "my_" +# +# ## Trim the given suffix from the field +# # [[processors.strings.trim_suffix]] +# # field = "read_count" +# # suffix = "_count" +# +# ## Replace all non-overlapping instances of old with new +# # [[processors.strings.replace]] +# # measurement = "*" +# # old = ":" +# # new = "_" +# +# ## Trims strings based on width +# # [[processors.strings.left]] +# # field = "message" +# # width = 10 +# +# ## Decode a base64 encoded utf-8 string +# # [[processors.strings.base64decode]] +# # field = "message" + + +# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. +# [[processors.tag_limit]] +# ## Maximum number of tags to preserve +# limit = 10 +# +# ## List of tags to preferentially preserve +# keep = ["foo", "bar", "baz"] + + +# # Uses a Go template to create a new tag +# [[processors.template]] +# ## Tag to set with the output of the template. +# tag = "topic" +# +# ## Go template used to create the tag value. In order to ease TOML +# ## escaping requirements, you may wish to use single quotes around the +# ## template string. +# template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' + + +# # Print all metrics that pass through this filter. +# [[processors.topk]] +# ## How many seconds between aggregations +# # period = 10 +# +# ## How many top metrics to return +# # k = 10 +# +# ## Over which tags should the aggregation be done. Globs can be specified, in +# ## which case any tag matching the glob will aggregated over. If set to an +# ## empty list is no aggregation over tags is done +# # group_by = ['*'] +# +# ## Over which fields are the top k are calculated +# # fields = ["value"] +# +# ## What aggregation to use. Options: sum, mean, min, max +# # aggregation = "mean" +# +# ## Instead of the top k largest metrics, return the bottom k lowest metrics +# # bottomk = false +# +# ## The plugin assigns each metric a GroupBy tag generated from its name and +# ## tags. If this setting is different than "" the plugin will add a +# ## tag (which name will be the value of this setting) to each metric with +# ## the value of the calculated GroupBy tag. Useful for debugging +# # add_groupby_tag = "" +# +# ## These settings provide a way to know the position of each metric in +# ## the top k. The 'add_rank_field' setting allows to specify for which +# ## fields the position is required. If the list is non empty, then a field +# ## will be added to each and every metric for each string present in this +# ## setting. This field will contain the ranking of the group that +# ## the metric belonged to when aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_rank' +# # add_rank_fields = [] +# +# ## These settings provide a way to know what values the plugin is generating +# ## when aggregating metrics. The 'add_aggregate_field' setting allows to +# ## specify for which fields the final aggregation value is required. If the +# ## list is non empty, then a field will be added to each every metric for +# ## each field present in this setting. This field will contain +# ## the computed aggregation for the group that the metric belonged to when +# ## aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_aggregate' +# # add_aggregate_fields = [] + + +# # Rotate multi field metric into several single field metrics +# [[processors.unpivot]] +# ## Tag to use for the name. +# tag_key = "name" +# ## Field to use for the name of the value. +# value_key = "value" + + +############################################################################### +# AGGREGATOR PLUGINS # +############################################################################### + + +# # Keep the aggregate basicstats of each metric passing through. +# [[aggregators.basicstats]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Configures which basic stats to push as fields +# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] + + +# # Report the final metric of a series +# [[aggregators.final]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## The time that a series is not updated until considering it final. +# series_timeout = "5m" + + +# # Create aggregate histograms. +# [[aggregators.histogram]] +# ## The period in which to flush the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## If true, the histogram will be reset on flush instead +# ## of accumulating the results. +# reset = false +# +# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. +# ## Defaults to true. +# cumulative = true +# +# ## Example config that aggregates all fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] +# # ## The name of metric. +# # measurement_name = "cpu" +# +# ## Example config that aggregates only specific fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] +# # ## The name of metric. +# # measurement_name = "diskio" +# # ## The concrete fields of metric +# # fields = ["io_time", "read_time", "write_time"] + + +# # Merge metrics into multifield metrics by series key +# [[aggregators.merge]] +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = true + + +# # Keep the aggregate min/max of each metric passing through. +# [[aggregators.minmax]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false + + +# # Count the occurrence of values in fields. +# [[aggregators.valuecounter]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## The fields for which the values will be counted +# fields = [] + + +############################################################################### +# INPUT PLUGINS # +# # +# !!IMPORTANT!! -- all input plugins require an instance_id attribute # +# # +############################################################################### + +## default plugins + +# # Read metrics about cpu usage +# [[inputs.cpu]] +# instance_id = "host" +# ## Whether to report per-cpu stats or not +# percpu = true +# ## Whether to report total system cpu stats or not +# totalcpu = true +# ## If true, collect raw CPU time metrics. +# collect_cpu_time = false +# ## If true, compute and report the sum of all non-idle CPU states. +# report_active = false + + +# # Read metrics about disk usage by mount point +# [[inputs.disk]] +# instance_id = "host" +# ## By default stats will be gathered for all mount points. +# ## Set mount_points will restrict the stats to only the specified mount points. +# # mount_points = ["/"] + +# ## Ignore mount points by filesystem type. +# ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] + + +# # Read metrics about disk IO by device +# [[inputs.diskio]] +# instance_id = "host" +# ## By default, agent will gather stats for all devices including +# ## disk partitions. +# ## Setting devices will restrict the stats to the specified devices. +# # devices = ["sda", "sdb", "vd*"] +# ## Uncomment the following line if you need disk serial numbers. +# # skip_serial_number = false +# # +# ## On systems which support it, device metadata can be added in the form of +# ## tags. +# ## Currently only Linux is supported via udev properties. You can view +# ## available properties for a device by running: +# ## 'udevadm info -q property -n /dev/sda' +# ## Note: Most, but not all, udev properties can be accessed this way. Properties +# ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. +# # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] +# # +# ## Using the same metadata source as device_tags, you can also customize the +# ## name of the device via templates. +# ## The 'name_templates' parameter is a list of templates to try and apply to +# ## the device. The template may contain variables in the form of '$PROPERTY' or +# ## '${PROPERTY}'. The first template which does not contain any variables not +# ## present for the device is used as the device name tag. +# ## The typical use case is for LVM volumes, to get the VG/LV name instead of +# ## the near-meaningless DM-0 name. +# # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] + + +# # Get kernel statistics from /proc/stat +# [[inputs.kernel]] +# instance_id = "host" +# # no other configuration + + +# # Read metrics about memory usage +# [[inputs.mem]] +# instance_id = "host" +# # no other configuration + +# # Read metrics about network interface usage +# [[inputs.net]] +# instance_id = "host" +# ## By default, agent gathers stats from any up interface (excluding loopback) +# ## Setting interfaces will tell it to gather these explicit interfaces, +# ## regardless of status. +# ## +# # interfaces = ["eth0"] +# ## +# ## On linux systems agent also collects protocol stats. +# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. +# ## +# # ignore_protocol_stats = false +# ## + +# # Get the number of processes and group them by status +# [[inputs.processes]] +# instance_id = "host" +# # no other configuration + + +# # Read metrics about swap memory usage +# [[inputs.swap]] +# instance_id = "host" +# # no other configuration + + +# # Read metrics about system load & uptime +# [[inputs.system]] +# instance_id = "host" +# ## Uncomment to remove deprecated metrics. +# # fielddrop = ["uptime_format"] + +## end default plugins +## agent plugins + +# # Collect statistics about itself +# [[inputs.internal]] +# instance_id = "host" +# ## If true, collect agent memory stats. +# # collect_memstats = true + +## end agent plugins + +# # Gather ActiveMQ metrics +# [[inputs.activemq]] +# instance_id = "" # REQUIRED +# ## ActiveMQ WebConsole URL +# url = "http://127.0.0.1:8161" +# +# ## Required ActiveMQ Endpoint +# ## deprecated in 1.11; use the url option +# # server = "127.0.0.1" +# # port = 8161 +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Required ActiveMQ webadmin root path +# # webadmin = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read stats from aerospike server(s) +# [[inputs.aerospike]] +# instance_id = "" # REQUIRED +# ## Aerospike servers to connect to (with port) +# ## This plugin will query all namespaces the aerospike +# ## server has configured and get stats for them. +# servers = ["localhost:3000"] +# +# # username = "circonus" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# # Feature Options +# # Add namespace variable to limit the namespaces executed on +# # Leave blank to do all +# # disable_query_namespaces = true # default false +# # namespaces = ["namespace1", "namespace2"] +# +# # Enable set level telmetry +# # query_sets = true # default: false +# # Add namespace set combinations to limit sets executed on +# # Leave blank to do all sets +# # sets = ["namespace1/set1", "namespace1/set2", "namespace3"] +# +# # Histograms +# # enable_ttl_histogram = true # default: false +# # enable_object_size_linear_histogram = true # default: false +# +# # by default, aerospike produces a 100 bucket histogram +# # this is not great for most graphing tools, this will allow +# # the ability to squash this to a smaller number of buckets +# # num_histogram_buckets = 100 # default: 10 + + +# # Read Apache status information (mod_status) +# [[inputs.apache]] +# instance_id = "" # REQUIRED +# ## An array of URLs to gather from, must be directed at the machine +# ## readable version of the mod_status page including the auto query string. +# ## Default is "http://localhost/server-status?auto". +# urls = ["http://localhost/server-status?auto"] +# +# ## Credentials for basic HTTP authentication. +# # username = "myuser" +# # password = "mypassword" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor APC UPSes connected to apcupsd +# [[inputs.apcupsd]] +# instance_id = "" # REQUIRED +# # A list of running apcupsd server to connect to. +# # If not provided will default to tcp://127.0.0.1:3551 +# servers = ["tcp://127.0.0.1:3551"] +# +# ## Timeout for dialing server. +# timeout = "5s" + + +# # Gather metrics from Apache Aurora schedulers +# [[inputs.aurora]] +# instance_id = "" # REQUIRED +# ## Schedulers are the base addresses of your Aurora Schedulers +# schedulers = ["http://127.0.0.1:8081"] +# +# ## Set of role types to collect metrics from. +# ## +# ## The scheduler roles are checked each interval by contacting the +# ## scheduler nodes; zookeeper is not contacted. +# # roles = ["leader", "follower"] +# +# ## Timeout is the max time for total network operations. +# # timeout = "5s" +# +# ## Username and password are sent using HTTP Basic Auth. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather Azure Storage Queue metrics +# [[inputs.azure_storage_queue]] +# instance_id = "" # REQUIRED +# ## Required Azure Storage Account name +# account_name = "mystorageaccount" +# +# ## Required Azure Storage Account access key +# account_key = "storageaccountaccesskey" +# +# ## Set to false to disable peeking age of oldest message (executes faster) +# # peek_oldest_message_age = true + + +# # Read metrics of bcache from stats_total and dirty_data +# [[inputs.bcache]] +# instance_id = "" # REQUIRED +# ## Bcache sets path +# ## If not specified, then default is: +# bcachePath = "/sys/fs/bcache" +# +# ## By default, agent gather stats for all bcache devices +# ## Setting devices will restrict the stats to the specified +# ## bcache devices. +# bcacheDevs = ["bcache0"] + + +# # Collects Beanstalkd server and tubes stats +# [[inputs.beanstalkd]] +# instance_id = "" # REQUIRED +# ## Server to collect data from +# server = "localhost:11300" +# +# ## List of tubes to gather stats about. +# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command +# tubes = ["notifications"] + + +# # Read BIND nameserver XML statistics +# [[inputs.bind]] +# instance_id = "" # REQUIRED +# ## An array of BIND XML statistics URI to gather stats. +# ## Default is "http://localhost:8053/xml/v3". +# # urls = ["http://localhost:8053/xml/v3"] +# # gather_memory_contexts = false +# # gather_views = false + + +# # Collect bond interface status, slaves statuses and failures count +# [[inputs.bond]] +# instance_id = "" # REQUIRED +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" +# +# ## By default, agent will gather stats for all bond interfaces +# ## Setting interfaces will restrict the stats to the specified +# ## bond interfaces. +# # bond_interfaces = ["bond0"] + + +# # Collect Kafka topics and consumers status from Burrow HTTP API. +# [[inputs.burrow]] +# instance_id = "" # REQUIRED +# ## Burrow API endpoints in format "schema://host:port". +# ## Default is "http://localhost:8000". +# servers = ["http://localhost:8000"] +# +# ## Override Burrow API prefix. +# ## Useful when Burrow is behind reverse-proxy. +# # api_prefix = "/v3/kafka" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Limit per-server concurrent connections. +# ## Useful in case of large number of topics or consumer groups. +# # concurrent_connections = 20 +# +# ## Filter clusters, default is no filtering. +# ## Values can be specified as glob patterns. +# # clusters_include = [] +# # clusters_exclude = [] +# +# ## Filter consumer groups, default is no filtering. +# ## Values can be specified as glob patterns. +# # groups_include = [] +# # groups_exclude = [] +# +# ## Filter topics, default is no filtering. +# ## Values can be specified as glob patterns. +# # topics_include = [] +# # topics_exclude = [] +# +# ## Credentials for basic HTTP authentication. +# # username = "" +# # password = "" +# +# ## Optional SSL config +# # ssl_ca = "/etc/circonus-unified-agent/ca.pem" +# # ssl_cert = "/etc/circonus-unified-agent/cert.pem" +# # ssl_key = "/etc/circonus-unified-agent/key.pem" +# # insecure_skip_verify = false + + +# # Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster. +# [[inputs.ceph]] +# instance_id = "" # REQUIRED +# ## This is the recommended interval to poll. Too frequent and you will lose +# ## data points due to timeouts during rebalancing and recovery +# interval = '1m' +# +# ## All configuration values are optional, defaults are shown below +# +# ## location of ceph binary +# ceph_binary = "/usr/bin/ceph" +# +# ## directory in which to look for socket files +# socket_dir = "/var/run/ceph" +# +# ## prefix of MON and OSD socket files, used to determine socket type +# mon_prefix = "ceph-mon" +# osd_prefix = "ceph-osd" +# mds_prefix = "ceph-mds" +# rgw_prefix = "ceph-client" +# +# ## suffix used to identify socket files +# socket_suffix = "asok" +# +# ## Ceph user to authenticate as +# ceph_user = "client.admin" +# +# ## Ceph configuration to use to locate the cluster +# ceph_config = "/etc/ceph/ceph.conf" +# +# ## Whether to gather statistics via the admin socket +# gather_admin_socket_stats = true +# +# ## Whether to gather statistics via ceph commands +# gather_cluster_stats = false + + +# # Read specific statistics per cgroup +# [[inputs.cgroup]] +# instance_id = "" # REQUIRED +# ## Directories in which to look for files, globs are supported. +# ## Consider restricting paths to the set of cgroups you really +# ## want to monitor if you have a large number of cgroups, to avoid +# ## any cardinality issues. +# # paths = [ +# # "/sys/fs/cgroup/memory", +# # "/sys/fs/cgroup/memory/child1", +# # "/sys/fs/cgroup/memory/child2/*", +# # ] +# ## cgroup stat fields, as file names, globs are supported. +# ## these file names are appended to each path from above. +# # files = ["memory.*usage*", "memory.limit_in_bytes"] + + +# # Get standard chrony metrics, requires chronyc executable. +# [[inputs.chrony]] +# instance_id = "" # REQUIRED +# ## If true, chronyc tries to perform a DNS lookup for the time server. +# # dns_lookup = false + + +# # Pull Metric Statistics from Amazon CloudWatch +# [[inputs.cloudwatch]] +# instance_id = "" # REQUIRED +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Assumed credentials via STS if role_arn is specified +# ## 2) explicit credentials from 'access_key' and 'secret_key' +# ## 3) shared profile from 'profile' +# ## 4) environment variables +# ## 5) shared credentials file +# ## 6) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Cloudwatch API +# # and will not be collected by the agent. +# # +# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via CloudWatch API) +# delay = "5m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Recommended if "delay" and "period" are both within 3 hours of request time. Invalid values will be ignored. +# ## Recently Active feature will only poll for CloudWatch ListMetrics values that occurred within the last 3 Hours. +# ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics API and require less memory to retain. +# ## Do not enable if "period" or "delay" is longer than 3 hours, as it will not return data more than 3 hours old. +# ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html +# #recently_active = "PT3H" +# +# ## Configure the TTL for the internal cache of metrics. +# # cache_ttl = "1h" +# +# ## Metric Statistic Namespace (required) +# namespace = "AWS/ELB" +# +# ## Maximum requests per second. Note that the global default AWS rate limit is +# ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a +# ## maximum of 50. +# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html +# # ratelimit = 25 +# +# ## Timeout for http requests made by the cloudwatch client. +# # timeout = "5s" +# +# ## Namespace-wide statistic filters. These allow fewer queries to be made to +# ## cloudwatch. +# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # statistic_exclude = [] +# +# ## Metrics to Pull +# ## Defaults to all Metrics in Namespace if nothing is provided +# ## Refreshes Namespace available metrics every 1h +# #[[inputs.cloudwatch.metrics]] +# # names = ["Latency", "RequestCount"] +# # +# # ## Statistic filters for Metric. These allow for retrieving specific +# # ## statistics for an individual metric. +# # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # # statistic_exclude = [] +# # +# # ## Dimension filters for Metric. All dimensions defined for the metric names +# # ## must be specified in order to retrieve the metric statistics. +# # [[inputs.cloudwatch.metrics.dimensions]] +# # name = "LoadBalancerName" +# # value = "p-example" + + +# # Collects conntrack stats from the configured directories and files. +# [[inputs.conntrack]] +# instance_id = "" # REQUIRED +# ## The following defaults would work with multiple versions of conntrack. +# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across +# ## kernel versions, as are the directory locations. +# +# ## Superset of filenames to look for within the conntrack dirs. +# ## Missing files will be ignored. +# files = ["ip_conntrack_count","ip_conntrack_max", +# "nf_conntrack_count","nf_conntrack_max"] +# +# ## Directories to search within for the conntrack files above. +# ## Missing directories will be ignored. +# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] + + +# # Gather health check statuses from services registered in Consul +# [[inputs.consul]] +# instance_id = "" # REQUIRED +# ## Consul server address +# # address = "localhost:8500" +# +# ## URI scheme for the Consul server, one of "http", "https" +# # scheme = "http" +# +# ## Metric version controls the mapping from Consul metrics into +# ## internal metrics. +# ## +# ## example: metric_version = 1; deprecated in 1.15 +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## ACL token used in every request +# # token = "" +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Data center to query the health checks from +# # datacenter = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true +# +# ## Consul checks' tag splitting +# # When tags are formatted like "key:value" with ":" as a delimiter then +# # they will be splitted and reported as proper key:value pairs +# # tag_delimiter = ":" + + +# # Read metrics from one or many couchbase clusters +# [[inputs.couchbase]] +# instance_id = "" # REQUIRED +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## http://couchbase-0.example.com/ +# ## http://admin:secret@couchbase-0.example.com:8091/ +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no protocol is specified, HTTP is used. +# ## If no port is specified, 8091 is used. +# servers = ["http://localhost:8091"] + + +# # Read CouchDB Stats from one or more servers +# [[inputs.couchdb]] +# instance_id = "" # REQUIRED +# ## Works with CouchDB stats endpoints out of the box +# ## Multiple Hosts from which to read CouchDB stats: +# hosts = ["http://localhost:8086/_stats"] +# +# ## Use HTTP Basic Authentication. +# # basic_username = "circonus" +# # basic_password = "p@ssw0rd" + + +# # Input plugin for DC/OS metrics +# [[inputs.dcos]] +# instance_id = "" # REQUIRED +# ## The DC/OS cluster URL. +# cluster_url = "https://dcos-ee-master-1" +# +# ## The ID of the service account. +# service_account_id = "circonus" +# ## The private key file for the service account. +# service_account_private_key = "/etc/circonus-unified-agent/circonus-sa-key.pem" +# +# ## Path containing login token. If set, will read on every gather. +# # token_file = "/home/dcos/.dcos/token" +# +# ## In all filter options if both include and exclude are empty all items +# ## will be collected. Arrays may contain glob patterns. +# ## +# ## Node IDs to collect metrics from. If a node is excluded, no metrics will +# ## be collected for its containers or apps. +# # node_include = [] +# # node_exclude = [] +# ## Container IDs to collect container metrics from. +# # container_include = [] +# # container_exclude = [] +# ## Container IDs to collect app metrics from. +# # app_include = [] +# # app_exclude = [] +# +# ## Maximum concurrent connections to the cluster. +# # max_connections = 10 +# ## Maximum time to receive a response from cluster. +# # response_timeout = "20s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# ## Recommended filtering to reduce series cardinality. +# # [inputs.dcos.tagdrop] +# # path = ["/var/lib/mesos/slave/slaves/*"] + + +# # Read metrics from one or many disque servers +# [[inputs.disque]] +# instance_id = "" # REQUIRED +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port and password. +# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost"] + + +# # Provide a native collection for dmsetup based statistics for dm-cache +# [[inputs.dmcache]] +# instance_id = "" # REQUIRED +# ## Whether to report per-device stats or not +# per_device = true + + +# # Query given DNS server and gives statistics +# [[inputs.dns_query]] +# instance_id = "" # REQUIRED +# ## servers to query +# servers = ["8.8.8.8"] +# +# ## Network is the network protocol name. +# # network = "udp" +# +# ## Domains or subdomains to query. +# # domains = ["."] +# +# ## Query record type. +# ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. +# # record_type = "A" +# +# ## Dns server port. +# # port = 53 +# +# ## Query timeout in seconds. +# # timeout = 2 + + +# # Read metrics about docker containers +# [[inputs.docker]] +# instance_id = "" # REQUIRED +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# endpoint = "unix:///var/run/docker.sock" +# +# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) +# gather_services = false +## +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = true +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# container_name_include = [] +# container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## Timeout for docker list, info, and stats commands +# timeout = "5s" +# +# ## Whether to report for each container per-device blkio (8:0, 8:1...) and +# ## network (eth0, eth1, ...) stats or not +# perdevice = true +# +# ## Whether to report for each container total blkio and network stats or not +# total = false +# +# ## Which environment variables should we use as a tag +# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# docker_label_include = [] +# docker_label_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read statistics from one or many dovecot servers +# [[inputs.dovecot]] +# instance_id = "" # REQUIRED +# ## specify dovecot servers via an address:port list +# ## e.g. +# ## localhost:24242 +# ## +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost:24242"] +# +# ## Type is one of "user", "domain", "ip", or "global" +# type = "global" +# +# ## Wildcard matches like "*.com". An empty string "" is same as "*" +# ## If type = "ip" filters should be +# filters = [""] + + +# # Read metrics about docker containers from Fargate/ECS v2, v3 meta endpoints. +# [[inputs.ecs]] +# instance_id = "" # REQUIRED +# ## ECS metadata url. +# ## Metadata v2 API is used if set explicitly. Otherwise, +# ## v3 metadata endpoint API is used if available. +# # endpoint_url = "" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "RUNNING" state will be captured. +# ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", +# ## "RESOURCES_PROVISIONED", "STOPPED". +# # container_status_include = [] +# # container_status_exclude = [] +# +# ## ecs labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# ecs_label_include = [ "com.amazonaws.ecs.*" ] +# ecs_label_exclude = [] +# +# ## Timeout for queries. +# # timeout = "5s" + + +# # Read stats from one or more Elasticsearch servers or clusters +# [[inputs.elasticsearch]] +# instance_id = "" # REQUIRED +# ## specify a list of one or more Elasticsearch servers +# # you can add username and password to your url to use basic authentication: +# # servers = ["http://user:pass@localhost:9200"] +# servers = ["http://localhost:9200"] +# +# ## Timeout for HTTP requests to the elastic search server(s) +# http_timeout = "5s" +# +# ## When local is true (the default), the node will read only its own stats. +# ## Set local to false when you want to read the node stats from all nodes +# ## of the cluster. +# local = true +# +# ## Set cluster_health to true when you want to also obtain cluster health stats +# cluster_health = false +# +# ## Adjust cluster_health_level when you want to also obtain detailed health stats +# ## The options are +# ## - indices (default) +# ## - cluster +# # cluster_health_level = "indices" +# +# ## Set cluster_stats to true when you want to also obtain cluster stats. +# cluster_stats = false +# +# ## Only gather cluster_stats from the master node. To work this require local = true +# cluster_stats_only_from_master = true +# +# ## Indices to collect; can be one or more indices names or _all +# indices_include = ["_all"] +# +# ## One of "shards", "cluster", "indices" +# indices_level = "shards" +# +# ## node_stats is a list of sub-stats that you want to have gathered. Valid options +# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", +# ## "breaker". Per default, all stats are gathered. +# # node_stats = ["jvm", "http"] +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Returns ethtool statistics for given interfaces +# [[inputs.ethtool]] +# instance_id = "" # REQUIRED +# ## List of interfaces to pull metrics for +# # interface_include = ["eth0"] +# +# ## List of interfaces to ignore when pulling metrics. +# # interface_exclude = ["eth1"] + + +# # Read metrics from one or more commands that can output to stdout +# [[inputs.exec]] +# instance_id = "" # REQUIRED +# ## Commands array +# commands = [ +# "/tmp/test.sh", +# "/usr/bin/mycollector --foo=bar", +# "/tmp/collect_*.sh" +# ] +# +# ## Timeout for each command to complete. +# timeout = "5s" +# +# ## measurement name suffix (for separating different commands) +# name_suffix = "_mycollector" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from fail2ban. +# [[inputs.fail2ban]] +# instance_id = "" # REQUIRED +# ## Use sudo to run fail2ban-client +# use_sudo = false + + +# # Read devices value(s) from a Fibaro controller +# [[inputs.fibaro]] +# instance_id = "" # REQUIRED +# ## Required Fibaro controller address/hostname. +# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available +# url = "http://:80" +# +# ## Required credentials to access the API (http://) +# username = "" +# password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" + + +# # Parse a complete file each interval +# [[inputs.file]] +# instance_id = "" # REQUIRED +# ## Files to parse each interval. Accept standard unix glob matching rules, +# ## as well as ** to match recursive files and directories. +# files = ["/tmp/metrics.out"] +# +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. +# # file_tag = "" +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Count files in a directory +# [[inputs.filecount]] +# instance_id = "" # REQUIRED +# ## Directory to gather stats about. +# ## deprecated in 1.9; use the directories option +# # directory = "/var/cache/apt/archives" +# +# ## Directories to gather stats about. +# ## This accept standard unit glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories +# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories +# ## /var/log -> count all files in /var/log and all of its subdirectories +# directories = ["/var/cache/apt/archives"] +# +# ## Only count files that match the name pattern. Defaults to "*". +# name = "*.deb" +# +# ## Count files in subdirectories. Defaults to true. +# recursive = false +# +# ## Only count regular files. Defaults to true. +# regular_only = true +# +# ## Follow all symlinks while walking the directory tree. Defaults to false. +# follow_symlinks = false +# +# ## Only count files that are at least this size. If size is +# ## a negative number, only count files that are smaller than the +# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... +# ## Without quotes and units, interpreted as size in bytes. +# size = "0B" +# +# ## Only count files that have not been touched for at least this +# ## duration. If mtime is negative, only count files that have been +# ## touched in this duration. Defaults to "0s". +# mtime = "0s" + + +# # Read stats about given file(s) +# [[inputs.filestat]] +# instance_id = "" # REQUIRED +# ## Files to gather stats about. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/log/**.log"] +# +# ## If true, read the entire file and calculate an sha256 checksum. +# sha256 = false + + +# # Read real time temps from fireboard.io servers +# [[inputs.fireboard]] +# instance_id = "" # REQUIRED +# ## Specify auth token for your account +# auth_token = "invalidAuthToken" +# ## You can override the fireboard server URL if necessary +# # url = https://fireboard.io/api/v1/devices.json +# ## You can set a different http_timeout if you need to +# ## You should set a string using an number and time indicator +# ## for example "12s" for 12 seconds. +# # http_timeout = "4s" + + +# # Read metrics exposed by fluentd in_monitor plugin +# [[inputs.fluentd]] +# instance_id = "" # REQUIRED +# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). +# ## +# ## Endpoint: +# ## - only one URI is allowed +# ## - https is not supported +# endpoint = "http://localhost:24220/api/plugins.json" +# +# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) +# exclude = [ +# "monitor_agent", +# "dummy", +# ] + + +# # Gather repository information from GitHub hosted repositories. +# [[inputs.github]] +# instance_id = "" # REQUIRED +# ## List of repositories to monitor. +# repositories = [ +# "circonus-labs/circonus-unified-agent", +# "influxdata/influxdb" +# ] +# +# ## Github API access token. Unauthenticated requests are limited to 60 per hour. +# # access_token = "" +# +# ## Github API enterprise url. Github Enterprise accounts must specify their base url. +# # enterprise_base_url = "" +# +# ## Timeout for HTTP requests. +# # http_timeout = "5s" + + +# # Read flattened metrics from one or more GrayLog HTTP endpoints +# [[inputs.graylog]] +# instance_id = "" # REQUIRED +# ## API endpoint, currently supported API: +# ## +# ## - multiple (Ex http://:12900/system/metrics/multiple) +# ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace}) +# ## +# ## For namespace endpoint, the metrics array will be ignored for that call. +# ## Endpoint can contain namespace and multiple type calls. +# ## +# ## Please check http://[graylog-server-ip]:12900/api-browser for full list +# ## of endpoints +# servers = [ +# "http://[graylog-server-ip]:12900/system/metrics/multiple", +# ] +# +# ## Metrics list +# ## List of metrics can be found on Graylog webservice documentation. +# ## Or by hitting the the web service api at: +# ## http://[graylog-host]:12900/system/metrics +# metrics = [ +# "jvm.cl.loaded", +# "jvm.memory.pools.Metaspace.committed" +# ] +# +# ## Username and password +# username = "" +# password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics of haproxy, via socket or csv stats page +# [[inputs.haproxy]] +# instance_id = "" # REQUIRED +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.10.3.33:1936, etc. +# ## Make sure you specify the complete path to the stats endpoint +# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats +# +# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats +# servers = ["http://myhaproxy.com:1936/haproxy?stats"] +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## You can also use local socket with standard wildcard globbing. +# ## Server address not starting with 'http' will be treated as a possible +# ## socket, so both examples below are valid. +# # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"] +# +# ## By default, some of the fields are renamed from what haproxy calls them. +# ## Setting this option to true results in the plugin keeping the original +# ## field names. +# # keep_field_names = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor disks' temperatures using hddtemp +# [[inputs.hddtemp]] +# instance_id = "" # REQUIRED +# ## By default, agent gathers temps data from all disks detected by the +# ## hddtemp. +# ## +# ## Only collect temps from the selected disks. +# ## +# ## A * as the device name will return the temperature values of all disks. +# ## +# # address = "127.0.0.1:7634" +# # devices = ["sda", "*"] + + +# # Read formatted metrics from one or more HTTP endpoints +# [[inputs.http]] +# instance_id = "" # REQUIRED +# ## One or more URLs from which to read formatted metrics +# urls = [ +# "http://localhost/metrics" +# ] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## HTTP entity-body to send with POST/PUT requests. +# # body = "" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## List of success status codes +# # success_status_codes = [200] +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" + + +# # HTTP/HTTPS request given an address a method and a timeout +# [[inputs.http_response]] +# instance_id = "" # REQUIRED +# ## Deprecated in 1.12, use 'urls' +# ## Server address (default http://localhost) +# # address = "http://localhost" +# +# ## List of urls to query. +# # urls = ["http://localhost"] +# +# ## Set http_proxy (agent uses the system wide proxy settings if it's is not set) +# # http_proxy = "http://localhost:8888" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## HTTP Request Method +# # method = "GET" +# +# ## Whether to follow redirects from the server (defaults to false) +# # follow_redirects = false +# +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional HTTP Request Body +# # body = ''' +# # {'fake':'data'} +# # ''' +# +# ## Optional name of the field that will contain the body of the response. +# ## By default it is set to an empty String indicating that the body's content won't be added +# # response_body_field = '' +# +# ## Maximum allowed HTTP response body size in bytes. +# ## 0 means to use the default of 32MiB. +# ## If the response body size exceeds this limit a "body_read_error" will be raised +# # response_body_max_size = "32MiB" +# +# ## Optional substring or regex match in body of the response (case sensitive) +# # response_string_match = "\"service_status\": \"up\"" +# # response_string_match = "ok" +# # response_string_match = "\".*_status\".?:.?\"up\"" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP Request Headers (all values must be strings) +# # [inputs.http_response.headers] +# # Host = "github.com" +# +# ## Optional setting to map response http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Interface to use when dialing an address +# # interface = "eth0" + + +# # Read flattened metrics from one or more JSON HTTP endpoints +# [[inputs.httpjson]] +# instance_id = "" # REQUIRED +# ## NOTE This plugin only reads numerical measurements, strings and booleans +# ## will be ignored. +# +# ## Name for the service being polled. Will be appended to the name of the +# ## measurement e.g. httpjson_webserver_stats +# ## +# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. +# name = "webserver_stats" +# +# ## URL of each server in the service's cluster +# servers = [ +# "http://localhost:9999/stats/", +# "http://localhost:9998/stats/", +# ] +# ## Set response_timeout (default 5 seconds) +# response_timeout = "5s" +# +# ## HTTP method to use: GET or POST (case-sensitive) +# method = "GET" +# +# ## List of tag names to extract from top-level of JSON server response +# # tag_keys = [ +# # "my_tag_1", +# # "my_tag_2" +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP parameters (all values must be strings). For "GET" requests, data +# ## will be included in the query. For "POST" requests, data will be included +# ## in the request body as "x-www-form-urlencoded". +# # [inputs.httpjson.parameters] +# # event_type = "cpu_spike" +# # threshold = "0.75" +# +# ## HTTP Headers (all values must be strings) +# # [inputs.httpjson.headers] +# # X-Auth-Token = "my-xauth-token" +# # apiVersion = "v1" + + +# # Gather Icinga2 status +# [[inputs.icinga2]] +# instance_id = "" # REQUIRED +# ## Required Icinga2 server address +# # server = "https://localhost:5665" +# +# ## Required Icinga2 object type ("services" or "hosts") +# # object_type = "services" +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Gets counters from all InfiniBand cards and ports installed +# [[inputs.infiniband]] +# instance_id = "" # REQUIRED +# # no other configuration options + + +# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.influxdb]] +# instance_id = "" # REQUIRED +# ## Works with InfluxDB debug endpoints out of the box, +# ## but other services can use this format too. +# ## See the influxdb plugin's README for more details. +# +# ## Multiple URLs from which to read InfluxDB-formatted JSON +# ## Default is "http://localhost:8086/debug/vars". +# urls = [ +# "http://localhost:8086/debug/vars" +# ] +# +# ## Username and password to send using HTTP Basic Authentication. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## http request & header timeout +# timeout = "5s" + +# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. +# [[inputs.interrupts]] +# instance_id = "" # REQUIRED +# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is +# ## stored as a field. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# # cpu_as_tag = false +# +# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. +# # [inputs.interrupts.tagdrop] +# # irq = [ "NET_RX", "TASKLET" ] + + +# # Read metrics from the bare metal servers via IPMI +# [[inputs.ipmi_sensor]] +# instance_id = "" # REQUIRED +# ## optionally specify the path to the ipmitool executable +# # path = "/usr/bin/ipmitool" +# ## +# ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. +# ## Sudo must be configured to allow the cua user to run ipmitool +# ## without a password. +# # use_sudo = false +# ## +# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR +# # privilege = "ADMINISTRATOR" +# ## +# ## optionally specify one or more servers via a url matching +# ## [username[:password]@][protocol[(address)]] +# ## e.g. +# ## root:passwd@lan(127.0.0.1) +# ## +# ## if no servers are specified, local machine sensor stats will be queried +# ## +# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] +# +# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid +# ## gaps or overlap in pulled data +# interval = "30s" +# +# ## Timeout for the ipmitool command to complete +# timeout = "20s" +# +# ## Schema Version: (Optional, defaults to version 1) +# metric_version = 2 + + +# # Gather packets and bytes counters from Linux ipsets +# [[inputs.ipset]] +# instance_id = "" # REQUIRED +# ## By default, we only show sets which have already matched at least 1 packet. +# ## set include_unmatched_sets = true to gather them all. +# include_unmatched_sets = false +# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") +# use_sudo = false +# ## The default timeout of 1s for ipset execution can be overridden here: +# # timeout = "1s" + + +# # Gather packets and bytes throughput from iptables +# [[inputs.iptables]] +# instance_id = "" # REQUIRED +# ## iptables require root access on most systems. +# ## Setting 'use_sudo' to true will make use of sudo to run iptables. +# ## Users must configure sudo to allow cua user to run iptables with no password. +# ## iptables can be restricted to only list command "iptables -nvL". +# use_sudo = false +# ## Setting 'use_lock' to true runs iptables with the "-w" option. +# ## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl") +# use_lock = false +# ## Define an alternate executable, such as "ip6tables". Default is "iptables". +# # binary = "ip6tables" +# ## defines the table to monitor: +# table = "filter" +# ## defines the chains to monitor. +# ## NOTE: iptables rules without a comment will not be monitored. +# ## Read the plugin documentation for more information. +# chains = [ "INPUT" ] + + +# # Collect virtual and real server stats from Linux IPVS +# [[inputs.ipvs]] +# instance_id = "" # REQUIRED +# # no other configuration options + + +# # Read jobs and cluster metrics from Jenkins instances +# [[inputs.jenkins]] +# instance_id = "" # REQUIRED +# ## The Jenkins URL in the format "schema://host:port" +# url = "http://my-jenkins-instance:8080" +# # username = "admin" +# # password = "admin" +# +# ## Set response_timeout +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Max Job Build Age filter +# ## Default 1 hour, ignore builds older than max_build_age +# # max_build_age = "1h" +# +# ## Optional Sub Job Depth filter +# ## Jenkins can have unlimited layer of sub jobs +# ## This config will limit the layers of pulling, default value 0 means +# ## unlimited pulling until no more sub jobs +# # max_subjob_depth = 0 +# +# ## Optional Sub Job Per Layer +# ## In workflow-multibranch-plugin, each branch will be created as a sub job. +# ## This config will limit to call only the lasted branches in each layer, +# ## empty will use default value 10 +# # max_subjob_per_layer = 10 +# +# ## Jobs to exclude from gathering +# # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] +# +# ## Nodes to exclude from gathering +# # node_exclude = [ "node1", "node2" ] +# +# ## Worker pool for jenkins plugin only +# ## Empty this field will use default value 5 +# # max_connections = 5 + + +# # Read JMX metrics from a Jolokia REST agent endpoint +# [[inputs.jolokia2_agent]] +# instance_id = "" # REQUIRED +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# # Add agents URLs to query +# urls = ["http://localhost:8080/jolokia"] +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add metrics to read +# [[inputs.jolokia2_agent.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read JMX metrics from a Jolokia REST proxy endpoint +# [[inputs.jolokia2_proxy]] +# instance_id = "" # REQUIRED +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# ## Proxy agent +# url = "http://localhost:8080/jolokia" +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add proxy targets to query +# # default_target_username = "" +# # default_target_password = "" +# [[inputs.jolokia2_proxy.target]] +# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi" +# # username = "" +# # password = "" +# +# ## Add metrics to read +# [[inputs.jolokia2_proxy.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.kapacitor]] +# instance_id = "" # REQUIRED +# ## Multiple URLs from which to read Kapacitor-formatted JSON +# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars". +# urls = [ +# "http://localhost:9092/kapacitor/v1/debug/vars" +# ] +# +# ## Time limit for http requests +# timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Get kernel statistics from /proc/vmstat +# [[inputs.kernel_vmstat]] +# instance_id = "" # REQUIRED +# # no other configuration options + + +# # Read status information from one or more Kibana servers +# [[inputs.kibana]] +# instance_id = "" # REQUIRED +# ## Specify a list of one or more Kibana servers +# servers = ["http://localhost:5601"] +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the Kubernetes api +# [[inputs.kube_inventory]] +# instance_id = "" # REQUIRED +# ## URL for the Kubernetes API +# url = "https://127.0.0.1" +# +# ## Namespace to use. Set to "" to use all namespaces. +# # namespace = "default" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional Resources to exclude from gathering +# ## Leave them with blank with try to gather everything available. +# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", +# ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" +# # resource_exclude = [ "deployments", "nodes", "statefulsets" ] +# +# ## Optional Resources to include when gathering +# ## Overrides resource_exclude if both set. +# # resource_include = [ "deployments", "nodes", "statefulsets" ] +# +# ## selectors to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all selectors as tags +# ## selector_exclude overrides selector_include if both set. +# # selector_include = [] +# # selector_exclude = ["*"] +# +# ## Optional TLS Config +# # tls_ca = "/path/to/cafile" +# # tls_cert = "/path/to/certfile" +# # tls_key = "/path/to/keyfile" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the kubernetes kubelet api +# [[inputs.kubernetes]] +# instance_id = "" # REQUIRED +# ## URL for the kubelet +# url = "http://127.0.0.1:10255" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Pod labels to be added as tags. An empty array for both include and +# ## exclude will include all labels. +# # label_include = [] +# # label_exclude = ["*"] +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from a LeoFS Server via SNMP +# [[inputs.leofs]] +# instance_id = "" # REQUIRED +# ## An array of URLs of the form: +# ## host [ ":" port] +# servers = ["127.0.0.1:4020"] + + +# # Provides Linux sysctl fs metrics +# [[inputs.linux_sysctl_fs]] +# instance_id = "" # REQUIRED +# # no other configuration options + + +# # Read metrics exposed by Logstash +# [[inputs.logstash]] +# instance_id = "" # REQUIRED +# ## The URL of the exposed Logstash API endpoint. +# url = "http://127.0.0.1:9600" +# +# ## Use Logstash 5 single pipeline API, set to true when monitoring +# ## Logstash 5. +# # single_pipeline = false +# +# ## Enable optional collection components. Can contain +# ## "pipelines", "process", and "jvm". +# # collect = ["pipelines", "process", "jvm"] +# +# ## Timeout for HTTP requests. +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config. +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# +# ## Use TLS but skip chain & host verification. +# # insecure_skip_verify = false +# +# ## Optional HTTP headers. +# # [inputs.logstash.headers] +# # "X-Special-Header" = "Special-Value" + + +# # Read metrics from local Lustre service on OST, MDS +# [[inputs.lustre2]] +# instance_id = "" # REQUIRED +# ## An array of /proc globs to search for Lustre stats +# ## If not specified, the default will work on Lustre 2.5.x +# ## +# # ost_procfiles = [ +# # "/proc/fs/lustre/obdfilter/*/stats", +# # "/proc/fs/lustre/osd-ldiskfs/*/stats", +# # "/proc/fs/lustre/obdfilter/*/job_stats", +# # ] +# # mds_procfiles = [ +# # "/proc/fs/lustre/mdt/*/md_stats", +# # "/proc/fs/lustre/mdt/*/job_stats", +# # ] + + +# # Gathers metrics from the /3.0/reports MailChimp API +# [[inputs.mailchimp]] +# instance_id = "" # REQUIRED +# ## MailChimp API key +# ## get from https://admin.mailchimp.com/account/api/ +# api_key = "" # required +# ## Reports for campaigns sent more than days_old ago will not be collected. +# ## 0 means collect all. +# days_old = 0 +# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old +# # campaign_id = "" + + +# # Retrieves information on a specific host in a MarkLogic Cluster +# [[inputs.marklogic]] +# instance_id = "" # REQUIRED +# ## Base URL of the MarkLogic HTTP Server. +# url = "http://localhost:8002" +# +# ## List of specific hostnames to retrieve information. At least (1) required. +# # hosts = ["hostname1", "hostname2"] +# +# ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges +# # username = "myuser" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many mcrouter servers +# [[inputs.mcrouter]] +# instance_id = "" # REQUIRED +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. +# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" + + +# # Read metrics from one or many memcached servers +# [[inputs.memcached]] +# instance_id = "" # REQUIRED +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.0.0.1:11211, etc. +# servers = ["localhost:11211"] +# # unix_sockets = ["/var/run/memcached.sock"] + + +# # plugin for gathering metrics from N Mesos masters +# [[inputs.mesos]] +# instance_id = "" # REQUIRED +# ## Timeout, in ms. +# timeout = 100 +# +# ## A list of Mesos masters. +# masters = ["http://localhost:5050"] +# +# ## Master metrics groups to be collected, by default, all enabled. +# master_collections = [ +# "resources", +# "master", +# "system", +# "agents", +# "frameworks", +# "framework_offers", +# "tasks", +# "messages", +# "evqueue", +# "registrar", +# "allocator", +# ] +# +# ## A list of Mesos slaves, default is [] +# # slaves = [] +# +# ## Slave metrics groups to be collected, by default, all enabled. +# # slave_collections = [ +# # "resources", +# # "agent", +# # "system", +# # "executors", +# # "tasks", +# # "messages", +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collects scores from a Minecraft server's scoreboard using the RCON protocol +# [[inputs.minecraft]] +# instance_id = "" # REQUIRED +# ## Address of the Minecraft server. +# # server = "localhost" +# +# ## Server RCON Port. +# # port = "25575" +# +# ## Server RCON Password. +# password = "" +# +# ## Uncomment to remove deprecated metric components. +# # tagdrop = ["server"] + + +# # Retrieve data from MODBUS slave devices +# [[inputs.modbus]] +# instance_id = "" # REQUIRED +# ## Connection Configuration +# ## +# ## The plugin supports connections to PLCs via MODBUS/TCP or +# ## via serial line communication in binary (RTU) or readable (ASCII) encoding +# ## +# ## Device name +# name = "Device" +# +# ## Slave ID - addresses a MODBUS device on the bus +# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] +# slave_id = 1 +# +# ## Timeout for each request +# timeout = "1s" +# +# ## Maximum number of retries and the time to wait between retries +# ## when a slave-device is busy. +# # busy_retries = 0 +# # busy_retries_wait = "100ms" +# +# # TCP - connect via Modbus/TCP +# controller = "tcp://localhost:502" +# +# ## Serial (RS485; RS232) +# # controller = "file:///dev/ttyUSB0" +# # baud_rate = 9600 +# # data_bits = 8 +# # parity = "N" +# # stop_bits = 1 +# # transmission_mode = "RTU" +# +# +# ## Measurements +# ## +# +# ## Digital Variables, Discrete Inputs and Coils +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## address - variable address +# +# discrete_inputs = [ +# { name = "start", address = [0]}, +# { name = "stop", address = [1]}, +# { name = "reset", address = [2]}, +# { name = "emergency_stop", address = [3]}, +# ] +# coils = [ +# { name = "motor1_run", address = [0]}, +# { name = "motor1_jog", address = [1]}, +# { name = "motor1_stop", address = [2]}, +# ] +# +# ## Analog Variables, Input Registers and Holding Registers +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## byte_order - the ordering of bytes +# ## |---AB, ABCD - Big Endian +# ## |---BA, DCBA - Little Endian +# ## |---BADC - Mid-Big Endian +# ## |---CDAB - Mid-Little Endian +# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE (the IEEE 754 binary representation) +# ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) +# ## scale - the final numeric variable representation +# ## address - variable address +# +# holding_registers = [ +# { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, +# { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]}, +# { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]}, +# { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]}, +# { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]}, +# { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]}, +# ] +# input_registers = [ +# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, +# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, +# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, +# ] + + +# # Read metrics from one or many MongoDB servers +# [[inputs.mongodb]] +# instance_id = "" # REQUIRED +# ## An array of URLs of the form: +# ## "mongodb://" [user ":" pass "@"] host [ ":" port] +# ## For example: +# ## mongodb://user:auth_key@10.10.3.30:27017, +# ## mongodb://10.10.3.33:18832, +# servers = ["mongodb://127.0.0.1:27017"] +# +# ## When true, collect cluster status +# ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which +# ## may have an impact on performance. +# # gather_cluster_status = true +# +# ## When true, collect per database stats +# # gather_perdb_stats = false +# +# ## When true, collect per collection stats +# # gather_col_stats = false +# +# ## List of db where collections stats are collected +# ## If empty, all db are concerned +# # col_stats_dbs = ["local"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics and status information about processes managed by Monit +# [[inputs.monit]] +# instance_id = "" # REQUIRED +# ## Monit HTTPD address +# address = "http://127.0.0.1:2812" +# +# ## Username and Password for Monit +# # username = "" +# # password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Aggregates the contents of multiple files into a single point +# [[inputs.multifile]] +# instance_id = "" # REQUIRED +# ## Base directory where agent will look for files. +# ## Omit this option to use absolute paths. +# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" +# +# ## If true, agent discards all data when a single file can't be read. +# ## Else, agent omits the field generated from this file. +# # fail_early = true +# +# ## Files to parse each interval. +# [[inputs.multifile.file]] +# file = "in_pressure_input" +# dest = "pressure" +# conversion = "float" +# [[inputs.multifile.file]] +# file = "in_temp_input" +# dest = "temperature" +# conversion = "float(3)" +# [[inputs.multifile.file]] +# file = "in_humidityrelative_input" +# dest = "humidityrelative" +# conversion = "float(3)" + + +# # Read metrics from one or many mysql servers +# [[inputs.mysql]] +# instance_id = "" # REQUIRED +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] +# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name +# ## e.g. +# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] +# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] +# # +# ## If no servers are specified, then localhost is used as the host. +# servers = ["tcp(127.0.0.1:3306)/"] +# +# ## Selects the metric output format. +# ## +# ## This option exists to maintain backwards compatibility, if you have +# ## existing metrics do not set or change this value until you are ready to +# ## migrate to the new format. +# ## +# ## If you do not have existing metrics from this plugin set to the latest +# ## version. +# ## +# metric_version = 2 +# +# ## if the list is empty, then metrics are gathered from all database tables +# # table_schema_databases = [] +# +# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list +# # gather_table_schema = false +# +# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST +# # gather_process_list = false +# +# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS +# # gather_user_statistics = false +# +# ## gather auto_increment columns and max values from information schema +# # gather_info_schema_auto_inc = false +# +# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS +# # gather_innodb_metrics = false +# +# ## gather metrics from SHOW SLAVE STATUS command output +# # gather_slave_status = false +# +# ## gather metrics from SHOW BINARY LOGS command output +# # gather_binary_logs = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES +# # gather_global_variables = true +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE +# # gather_table_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS +# # gather_table_lock_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE +# # gather_index_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS +# # gather_event_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME +# # gather_file_events_stats = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST +# # gather_perf_events_statements = false +# +# ## the limits for metrics form perf_events_statements +# # perf_events_statements_digest_text_limit = 120 +# # perf_events_statements_limit = 250 +# # perf_events_statements_time_limit = 86400 +# +# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) +# ## example: interval_slow = "30m" +# # interval_slow = "" +# +# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Provides metrics about the state of a NATS server +# [[inputs.nats]] +# instance_id = "" # REQUIRED +# ## The address of the monitoring endpoint of the NATS server +# server = "http://localhost:8222" +# +# ## Maximum time to receive response +# # response_timeout = "5s" + + +# # Neptune Apex data collector +# [[inputs.neptune_apex]] +# instance_id = "" # REQUIRED +# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. +# ## Measurements will be logged under "apex". +# +# ## The base URL of the local Apex(es). If you specify more than one server, they will +# ## be differentiated by the "source" tag. +# servers = [ +# "http://apex.local", +# ] +# +# ## The response_timeout specifies how long to wait for a reply from the Apex. +# #response_timeout = "5s" + + +# # Collect response time of a TCP or UDP connection +# [[inputs.net_response]] +# instance_id = "" # REQUIRED +# ## Protocol, must be "tcp" or "udp" +# ## NOTE: because the "udp" protocol does not respond to requests, it requires +# ## a send/expect string pair (see below). +# protocol = "tcp" +# ## Server address (default localhost) +# address = "localhost:80" +# +# ## Set timeout +# # timeout = "1s" +# +# ## Set read timeout (only used if expecting a response) +# # read_timeout = "1s" +# +# ## The following options are required for UDP checks. For TCP, they are +# ## optional. The plugin will send the given string to the server and then +# ## expect to receive the given 'expect' string back. +# ## string sent to the server +# # send = "ssh" +# ## expected string in answer +# # expect = "ssh" +# +# ## Uncomment to remove deprecated fields +# # fielddrop = ["result_type", "string_found"] + + +# # Read TCP metrics such as established, time wait and sockets counts. +# [[inputs.netstat]] +# instance_id = "" # REQUIRED +# # no other configuration options + + +# # Read Nginx's basic status information (ngx_http_stub_status_module) +# [[inputs.nginx]] +# instance_id = "" # REQUIRED +# # An array of Nginx stub_status URI to gather stats. +# urls = ["http://localhost/server_status"] +# +# ## Optional TLS Config +# tls_ca = "/etc/circonus-unified-agent/ca.pem" +# tls_cert = "/etc/circonus-unified-agent/cert.cer" +# tls_key = "/etc/circonus-unified-agent/key.key" +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = false +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Read Nginx Plus' full status information (ngx_http_status_module) +# [[inputs.nginx_plus]] +# instance_id = "" # REQUIRED +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx Plus Api documentation +# [[inputs.nginx_plus_api]] +# instance_id = "" # REQUIRED +# ## An array of API URI to gather stats. +# urls = ["http://localhost/api"] +# +# # Nginx API version, default: 3 +# # api_version = 3 +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-sts) +# [[inputs.nginx_sts]] +# instance_id = "" # REQUIRED +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) +# [[inputs.nginx_upstream_check]] +# instance_id = "" # REQUIRED +# ## An URL where Nginx Upstream check module is enabled +# ## It should be set to return a JSON formatted response +# url = "http://127.0.0.1/status?format=json" +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "check.example.com" +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-vts) +# [[inputs.nginx_vts]] +# instance_id = "" # REQUIRED +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # A plugin to collect stats from the NSD authoritative DNS name server +# [[inputs.nsd]] +# instance_id = "" # REQUIRED +# ## Address of server to connect to, optionally ':port'. Defaults to the +# ## address in the nsd config file. +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the nsd-control binary can be overridden with: +# # binary = "/usr/sbin/nsd-control" +# +# ## The default location of the nsd config file can be overridden with: +# # config_file = "/etc/nsd/nsd.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" + + +# # Read NSQ topic and channel statistics. +# [[inputs.nsq]] +# instance_id = "" # REQUIRED +# ## An array of NSQD HTTP API endpoints +# endpoints = ["http://localhost:4151"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collect kernel snmp counters and network interface statistics +# [[inputs.nstat]] +# instance_id = "" # REQUIRED +# ## file paths for proc files. If empty default paths will be used: +# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 +# ## These can also be overridden with env variables, see README. +# proc_net_netstat = "/proc/net/netstat" +# proc_net_snmp = "/proc/net/snmp" +# proc_net_snmp6 = "/proc/net/snmp6" +# ## dump metrics with 0 values too +# dump_zeros = true + + +# # Get standard NTP query metrics, requires ntpq executable. +# [[inputs.ntpq]] +# instance_id = "" # REQUIRED +# ## If false, set the -n ntpq flag. Can reduce metric gather time. +# dns_lookup = true + + +# # Pulls statistics from nvidia GPUs attached to the host +# [[inputs.nvidia_smi]] +# instance_id = "" # REQUIRED +# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/usr/bin/nvidia-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # Retrieve data from OPCUA devices +# [[inputs.opcua]] +# instance_id = "" # REQUIRED +# ## Device name +# # name = "localhost" +# # +# ## OPC UA Endpoint URL +# # endpoint = "opc.tcp://localhost:4840" +# # +# ## Maximum time allowed to establish a connect to the endpoint. +# # connect_timeout = "10s" +# # +# ## Maximum time allowed for a request over the estabilished connection. +# # request_timeout = "5s" +# # +# ## Security policy, one of "None", "Basic128Rsa15", "Basic256", +# ## "Basic256Sha256", or "auto" +# # security_policy = "auto" +# # +# ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto" +# # security_mode = "auto" +# # +# ## Path to cert.pem. Required when security mode or policy isn't "None". +# ## If cert path is not supplied, self-signed cert and key will be generated. +# # certificate = "/etc/circonus-unified-agent/cert.pem" +# # +# ## Path to private key.pem. Required when security mode or policy isn't "None". +# ## If key path is not supplied, self-signed cert and key will be generated. +# # private_key = "/etc/circonus-unified-agent/key.pem" +# # +# ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To +# ## authenticate using a specific ID, select 'Certificate' or 'UserName' +# # auth_method = "Anonymous" +# # +# ## Username. Required for auth_method = "UserName" +# # username = "" +# # +# ## Password. Required for auth_method = "UserName" +# # password = "" +# # +# ## Node ID configuration +# ## name - the variable name +# ## namespace - integer value 0 thru 3 +# ## identifier_type - s=string, i=numeric, g=guid, b=opaque +# ## identifier - tag as shown in opcua browser +# ## data_type - boolean, byte, short, int, uint, uint16, int16, +# ## uint32, int32, float, double, string, datetime, number +# ## Example: +# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", data_type="string", description="http://open62541.org"} +# nodes = [ +# {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, +# {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, +# ] + + +# # OpenLDAP cn=Monitor plugin +# [[inputs.openldap]] +# instance_id = "" # REQUIRED +# host = "localhost" +# port = 389 +# +# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. +# # note that port will likely need to be changed to 636 for ldaps +# # valid options: "" | "starttls" | "ldaps" +# tls = "" +# +# # skip peer certificate verification. Default is false. +# insecure_skip_verify = false +# +# # Path to PEM-encoded Root certificate to use to verify server certificate +# tls_ca = "/etc/ssl/certs.pem" +# +# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed. +# bind_dn = "" +# bind_password = "" +# +# # Reverse metric names so they sort more naturally. Recommended. +# # This defaults to false if unset, but is set to true when generating a new config +# reverse_metric_names = true + + +# # Get standard NTP query metrics from OpenNTPD. +# [[inputs.openntpd]] +# instance_id = "" # REQUIRED +# ## Run ntpctl binary with sudo. +# # use_sudo = false +# +# ## Location of the ntpctl binary. +# # binary = "/usr/sbin/ntpctl" +# +# ## Maximum time the ntpctl binary is allowed to run. +# # timeout = "5ms" + + +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# [[inputs.opensmtpd]] +# instance_id = "" # REQUIRED +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false +# +# ## The default location of the smtpctl binary can be overridden with: +# binary = "/usr/sbin/smtpctl" +# +# ## The default timeout of 1000ms can be overridden with (in milliseconds): +# timeout = 1000 + + +# # Read current weather and forecasts data from openweathermap.org +# [[inputs.openweathermap]] +# instance_id = "" # REQUIRED +# ## OpenWeatherMap API key. +# app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +# +# ## City ID's to collect weather data from. +# city_id = ["5391959"] +# +# ## Language of the description field. Can be one of "ar", "bg", +# ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", +# ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", +# ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" +# # lang = "en" +# +# ## APIs to fetch; can contain "weather" or "forecast". +# fetch = ["weather", "forecast"] +# +# ## OpenWeatherMap base URL +# # base_url = "https://api.openweathermap.org/" +# +# ## Timeout for HTTP response. +# # response_timeout = "5s" +# +# ## Preferred unit system for temperature and wind speed. Can be one of +# ## "metric", "imperial", or "standard". +# # units = "metric" +# +# ## Query interval; OpenWeatherMap updates their weather data every 10 +# ## minutes. +# interval = "10m" + + +# # Read metrics of passenger using passenger-status +# [[inputs.passenger]] +# instance_id = "" # REQUIRED +# ## Path of passenger-status. +# ## +# ## Plugin gather metric via parsing XML output of passenger-status +# ## More information about the tool: +# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html +# ## +# ## If no path is specified, then the plugin simply execute passenger-status +# ## hopefully it can be found in your PATH +# command = "passenger-status -v --show=xml" + + +# # Gather counters from PF +# [[inputs.pf]] +# instance_id = "" # REQUIRED +# ## PF require root access on most systems. +# ## Setting 'use_sudo' to true will make use of sudo to run pfctl. +# ## Users must configure sudo to allow cua user to run pfctl with no password. +# ## pfctl can be restricted to only list command "pfctl -s info". +# use_sudo = false + + +# # Read metrics of phpfpm, via HTTP status page or socket +# [[inputs.phpfpm]] +# instance_id = "" # REQUIRED +# ## An array of addresses to gather stats about. Specify an ip or hostname +# ## with optional port and path +# ## +# ## Plugin can be configured in three modes (either can be used): +# ## - http: the URL must start with http:// or https://, ie: +# ## "http://localhost/status" +# ## "http://192.168.130.1/status?full" +# ## +# ## - unixsocket: path to fpm socket, ie: +# ## "/var/run/php5-fpm.sock" +# ## or using a custom fpm status path: +# ## "/var/run/php5-fpm.sock:fpm-custom-status-path" +# ## +# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: +# ## "fcgi://10.0.0.12:9000/status" +# ## "cgi://10.0.10.12:9001/status" +# ## +# ## Example of multiple gathering from local socket and remote host +# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] +# urls = ["http://localhost/status"] +# +# ## Duration allowed to complete HTTP requests. +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Ping given url(s) and return statistics +# [[inputs.ping]] +# instance_id = "" # REQUIRED +# ## Hosts to send ping packets to. +# urls = ["example.org"] +# +# ## Method used for sending pings, can be either "exec" or "native". When set +# ## to "exec" the systems ping command will be executed. When set to "native" +# ## the plugin will send pings directly. +# ## +# ## While the default is "exec" for backwards compatibility, new deployments +# ## are encouraged to use the "native" method for improved compatibility and +# ## performance. +# # method = "exec" +# +# ## Number of ping packets to send per interval. Corresponds to the "-c" +# ## option of the ping command. +# # count = 1 +# +# ## Time to wait between sending ping packets in seconds. Operates like the +# ## "-i" option of the ping command. +# # ping_interval = 1.0 +# +# ## If set, the time to wait for a ping response in seconds. Operates like +# ## the "-W" option of the ping command. +# # timeout = 1.0 +# +# ## If set, the total ping deadline, in seconds. Operates like the -w option +# ## of the ping command. +# # deadline = 10 +# +# ## Interface or source address to send ping from. Operates like the -I or -S +# ## option of the ping command. +# # interface = "" +# +# ## Specify the ping executable binary. +# # binary = "ping" +# +# ## Arguments for ping command. When arguments is not empty, the command from +# ## the binary option will be used and other options (ping_interval, timeout, +# ## etc) will be ignored. +# # arguments = ["-c", "3"] +# +# ## Use only IPv6 addresses when resolving a hostname. +# # ipv6 = false + + +# # Measure postfix queue statistics +# [[inputs.postfix]] +# instance_id = "" # REQUIRED +# ## Postfix queue directory. If not provided, agent will try to use +# ## 'postconf -h queue_directory' to determine it. +# # queue_directory = "/var/spool/postfix" + + +# # Read metrics from one or many PowerDNS servers +# [[inputs.powerdns]] +# instance_id = "" # REQUIRED +# ## An array of sockets to gather stats about. +# ## Specify a path to unix socket. +# unix_sockets = ["/var/run/pdns.controlsocket"] + + +# # Read metrics from one or many PowerDNS Recursor servers +# [[inputs.powerdns_recursor]] +# instance_id = "" # REQUIRED +# ## Path to the Recursor control socket. +# unix_sockets = ["/var/run/pdns_recursor.controlsocket"] +# +# ## Directory to create receive socket. This default is likely not writable, +# ## please reference the full plugin documentation for a recommended setup. +# # socket_dir = "/var/run/" +# ## Socket permissions for the receive socket. +# # socket_mode = "0666" + + +# # Monitor process cpu and memory usage +# [[inputs.procstat]] +# instance_id = "" # REQUIRED +# ## PID file to monitor process +# pid_file = "/var/run/nginx.pid" +# ## executable name (ie, pgrep ) +# # exe = "nginx" +# ## pattern as argument for pgrep (ie, pgrep -f ) +# # pattern = "nginx" +# ## user as argument for pgrep (ie, pgrep -u ) +# # user = "nginx" +# ## Systemd unit name +# # systemd_unit = "nginx.service" +# ## CGroup name or path +# # cgroup = "systemd/system.slice/nginx.service" +# +# ## Windows service name +# # win_service = "" +# +# ## override for process_name +# ## This is optional; default is sourced from /proc//status +# # process_name = "bar" +# +# ## Field name prefix +# # prefix = "" +# +# ## When true add the full cmdline as a tag. +# # cmdline_tag = false +# +# ## Add the PID as a tag instead of as a field. When collecting multiple +# ## processes with otherwise matching tags this setting should be enabled to +# ## ensure each process has a unique identity. +# ## +# ## Enabling this option may result in a large number of series, especially +# ## when processes have a short lifetime. +# # pid_tag = false +# +# ## Method to use when finding process IDs. Can be one of 'pgrep', or +# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while +# ## the native finder performs the search directly in a manor dependent on the +# ## platform. Default is 'pgrep' +# # pid_finder = "pgrep" + + +# # Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2). +# [[inputs.proxmox]] +# instance_id = "" # REQUIRED +# ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. +# base_url = "https://localhost:8006/api2/json" +# api_token = "USER@REALM!TOKENID=UUID" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = false +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Reads last_run_summary.yaml file and converts to measurements +# [[inputs.puppetagent]] +# instance_id = "" # REQUIRED +# ## Location of puppet last run summary file +# location = "/var/lib/puppet/state/last_run_summary.yaml" + + +# # Reads metrics from RabbitMQ servers via the Management Plugin +# [[inputs.rabbitmq]] +# instance_id = "" # REQUIRED +# ## an instance id is required +# instance_id = "" +# ## Management Plugin url. (default: http://localhost:15672) +# # url = "http://localhost:15672" +# ## Credentials +# # username = "guest" +# # password = "guest" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional request timeouts +# ## +# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## A list of nodes to gather as the rabbitmq_node measurement. If not +# ## specified, metrics for all nodes are gathered. +# # nodes = ["rabbit@node1", "rabbit@node2"] +# +# ## A list of queues to gather as the rabbitmq_queue measurement. If not +# ## specified, metrics for all queues are gathered. +# # queues = ["circonus"] +# +# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not +# ## specified, metrics for all exchanges are gathered. +# # exchanges = ["circonus"] +# +# ## Queues to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all queues +# queue_name_include = [] +# queue_name_exclude = [] +# +# ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement. +# ## If neither are specified, metrics for all federation upstreams are gathered. +# ## Federation link metrics will only be gathered for queues and exchanges +# ## whose non-federation metrics will be collected (e.g a queue excluded +# ## by the 'queue_name_exclude' option will also be excluded from federation). +# ## Globs accepted. +# # federation_upstream_include = ["dataCentre-*"] +# # federation_upstream_exclude = [] + + +# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) +# [[inputs.raindrops]] +# instance_id = "" # REQUIRED +# ## An array of raindrops middleware URI to gather stats. +# urls = ["http://localhost:8080/_raindrops"] + + +# # RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required). +# [[inputs.ras]] +# instance_id = "" # REQUIRED +# ## Optional path to RASDaemon sqlite3 database. +# ## Default: /var/lib/rasdaemon/ras-mc_event.db +# # db_path = "" + + +# # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs +# [[inputs.redfish]] +# instance_id = "" # REQUIRED +# ## Server url +# address = "https://127.0.0.1:5000" +# +# ## Username, Password for hardware server +# username = "root" +# password = "password123456" +# +# ## ComputerSystemId +# computer_system_id="2M220100SL" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many redis servers +# [[inputs.redis]] +# instance_id = "" # REQUIRED +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:6379 +# ## tcp://:password@192.168.99.100 +# ## unix:///var/run/redis.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 6379 is used +# servers = ["tcp://localhost:6379"] +# +# ## Optional. Specify redis commands to retrieve values +# # [[inputs.redis.commands]] +# # command = ["get", "sample-key"] +# # field = "sample-key-value" +# # type = "string" +# +# ## specify server password +# # password = "s#cr@t%" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Read metrics from one or many RethinkDB servers +# [[inputs.rethinkdb]] +# instance_id = "" # REQUIRED +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## rethinkdb://user:auth_key@10.10.3.30:28105, +# ## rethinkdb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:28015"] +# ## +# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, +# ## protocol have to be named "rethinkdb2" - it will use 1_0 H. +# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] +# ## +# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol +# ## have to be named "rethinkdb". +# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] + + +# # Read metrics one or many Riak servers +# [[inputs.riak]] +# instance_id = "" # REQUIRED +# # Specify a list of one or more riak http servers +# servers = ["http://localhost:8098"] + + +# # Read API usage and limits for a Salesforce organisation +# [[inputs.salesforce]] +# instance_id = "" # REQUIRED +# ## specify your credentials +# ## +# username = "your_username" +# password = "your_password" +# ## +# ## (optional) security token +# # security_token = "your_security_token" +# ## +# ## (optional) environment type (sandbox or production) +# ## default is: production +# ## +# # environment = "production" +# ## +# ## (optional) API version (default: "39.0") +# ## +# # version = "39.0" + + +# # Monitor sensors, requires lm-sensors package +# [[inputs.sensors]] +# instance_id = "" # REQUIRED +# ## Remove numbers from field names. +# ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. +# # remove_numbers = true +# +# ## Timeout is the maximum amount of time that the sensors command can run. +# # timeout = "5s" + + +# # Read metrics from storage devices supporting S.M.A.R.T. +# [[inputs.smart]] +# instance_id = "" # REQUIRED +# ## Optionally specify the path to the smartctl executable +# # path_smartctl = "/usr/bin/smartctl" +# +# ## Optionally specify the path to the nvme-cli executable +# # path_nvme = "/usr/bin/nvme" +# +# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case +# ## ["auto-on"] - automatically find and enable additional vendor specific disk info +# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info +# # enable_extensions = ["auto-on"] +# +# ## On most platforms used cli utilities requires root access. +# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. +# ## Sudo must be configured to allow the cua user to run smartctl or nvme-cli +# ## without a password. +# # use_sudo = false +# +# ## Skip checking disks in this power mode. Defaults to +# ## "standby" to not wake up disks that have stopped rotating. +# ## See --nocheck in the man pages for smartctl. +# ## smartctl version 5.41 and 5.42 have faulty detection of +# ## power mode and might require changing this value to +# ## "never" depending on your disks. +# # nocheck = "standby" +# +# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed +# ## information from each drive into the 'smart_attribute' measurement. +# # attributes = false +# +# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. +# # excludes = [ "/dev/pass6" ] +# +# ## Optionally specify devices and device type, if unset +# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done +# ## and all found will be included except for the excluded in excludes. +# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] +# +# ## Timeout for the cli command to complete. +# # timeout = "30s" + + +# # Retrieves SNMP values from remote agents +# [[inputs.snmp]] +# instance_id = "" # REQUIRED +# ## Agent addresses to retrieve values from. +# ## example: agents = ["udp://127.0.0.1:161"] +# ## agents = ["tcp://127.0.0.1:161"] +# agents = ["udp://127.0.0.1:161"] +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## Agent host tag; the tag used to reference the source host +# # agent_host_tag = "agent_host" +# +# ## SNMP community string. +# # community = "public" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## Add fields and tables defining the variables you wish to collect. This +# ## example collects the system uptime and interface variables. Reference the +# ## full plugin documentation for configuration details. + + +# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. +# [[inputs.snmp_legacy]] +# instance_id = "" # REQUIRED +# ## Use 'oids.txt' file to translate oids to names +# ## To generate 'oids.txt' you need to run: +# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt +# ## Or if you have an other MIB folder with custom MIBs +# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt +# snmptranslate_file = "/tmp/oids.txt" +# [[inputs.snmp.host]] +# address = "192.168.2.2:161" +# # SNMP community +# community = "public" # default public +# # SNMP version (1, 2 or 3) +# # Version 3 not supported yet +# version = 2 # default 2 +# # SNMP response timeout +# timeout = 2.0 # default 2.0 +# # SNMP request retries +# retries = 2 # default 2 +# # Which get/bulk do you want to collect for this host +# collect = ["mybulk", "sysservices", "sysdescr"] +# # Simple list of OIDs to get, in addition to "collect" +# get_oids = [] +# +# [[inputs.snmp.host]] +# address = "192.168.2.3:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# collect = ["mybulk"] +# get_oids = [ +# "ifNumber", +# ".1.3.6.1.2.1.1.3.0", +# ] +# +# [[inputs.snmp.get]] +# name = "ifnumber" +# oid = "ifNumber" +# +# [[inputs.snmp.get]] +# name = "interface_speed" +# oid = "ifSpeed" +# instance = "0" +# +# [[inputs.snmp.get]] +# name = "sysuptime" +# oid = ".1.3.6.1.2.1.1.3.0" +# unit = "second" +# +# [[inputs.snmp.bulk]] +# name = "mybulk" +# max_repetition = 127 +# oid = ".1.3.6.1.2.1.1" +# +# [[inputs.snmp.bulk]] +# name = "ifoutoctets" +# max_repetition = 127 +# oid = "ifOutOctets" +# +# [[inputs.snmp.host]] +# address = "192.168.2.13:161" +# #address = "127.0.0.1:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# #collect = ["mybulk", "sysservices", "sysdescr", "systype"] +# collect = ["sysuptime" ] +# [[inputs.snmp.host.table]] +# name = "iftable3" +# include_instances = ["enp5s0", "eth1"] +# +# # SNMP TABLEs +# # table without mapping neither subtables +# [[inputs.snmp.table]] +# name = "iftable1" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# +# # table without mapping but with subtables +# [[inputs.snmp.table]] +# name = "iftable2" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# sub_tables = [".1.3.6.1.2.1.2.2.1.13"] +# +# # table with mapping but without subtables +# [[inputs.snmp.table]] +# name = "iftable3" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty. get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty, get all subtables +# +# # table with both mapping and subtables +# [[inputs.snmp.table]] +# name = "iftable4" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty get all subtables +# # sub_tables could be not "real subtables" +# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + + +# # Read stats from one or more Solr servers or cores +# [[inputs.solr]] +# instance_id = "" # REQUIRED +# ## specify a list of one or more Solr servers +# servers = ["http://localhost:8983"] +# +# ## specify a list of one or more Solr cores (default - all) +# # cores = ["main"] +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" + + +# # Read metrics from Microsoft SQL Server +# [[inputs.sqlserver]] +# instance_id = "" # REQUIRED +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters, in particular, tls connections can be created like so: +# ## "encrypt=true;certificate=;hostNameInCertificate=" +# # servers = [ +# # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=circonus;log=1;", +# # ] +# +# ## This enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 +# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. +# ## Possible values for database_type are +# ## "AzureSQLDB" +# ## "SQLServer" +# ## "AzureSQLManagedInstance" +# # database_type = "AzureSQLDB" +# +# +# ## Optional parameter, setting this to 2 will use a new version +# ## of the collection queries that break compatibility with the original +# ## dashboards. +# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB +# query_version = 2 +# +# ## If you are using AzureDB, setting this to true will gather resource utilization metrics +# # azuredb = false +# +# ## Possible queries +# ## Version 2: +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - DatabaseIO +# ## - ServerProperties +# ## - MemoryClerk +# ## - Schedulers +# ## - SqlRequests +# ## - VolumeSpace +# ## - Cpu +# +# ## Version 1: +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - CPUHistory +# ## - DatabaseIO +# ## - DatabaseSize +# ## - DatabaseStats +# ## - DatabaseProperties +# ## - MemoryClerk +# ## - VolumeSpace +# ## - PerformanceMetrics +# +# +# ## Queries enabled by default for specific Database Type +# ## database_type = AzureSQLDB +# ## AzureDBWaitStats, AzureDBResourceStats, AzureDBResourceGovernance, sqlAzureDBDatabaseIO +# +# ## A list of queries to include. If not specified, all the above listed queries are used. +# # include_query = [] +# +# ## A list of queries to explicitly ignore. +# exclude_query = [ 'Schedulers' , 'SqlRequests'] + + +# # Gather timeseries from Google Cloud Platform v3 monitoring API +# [[inputs.stackdriver]] +# instance_id = "" # REQUIRED +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## Include timeseries that start with the given metric type. +# metric_type_prefix_include = [ +# "compute.googleapis.com/", +# ] +# +# ## Exclude timeseries that start with the given metric type. +# # metric_type_prefix_exclude = [] +# +# ## Many metrics are updated once per minute; it is recommended to override +# ## the agent level interval with a value of 1m or greater. +# interval = "1m" +# +# ## Maximum number of API calls to make per second. The quota for accounts +# ## varies, it can be viewed on the API dashboard: +# ## https://cloud.google.com/monitoring/quotas#quotas_and_limits +# # rate_limit = 14 +# +# ## The delay and window options control the number of points selected on +# ## each gather. When set, metrics are gathered between: +# ## start: now() - delay - window +# ## end: now() - delay +# # +# ## Collection delay; if set too low metrics may not yet be available. +# # delay = "5m" +# # +# ## If unset, the window will start at 1m and be updated dynamically to span +# ## the time between calls (approximately the length of the plugin interval). +# # window = "1m" +# +# ## TTL for cached list of metric types. This is the maximum amount of time +# ## it may take to discover new metrics. +# # cache_ttl = "1h" +# +# ## If true, raw bucket counts are collected for distribution value types. +# ## For a more lightweight collection, you may wish to disable and use +# ## distribution_aggregation_aligners instead. +# # gather_raw_distribution_buckets = true +# +# ## Aggregate functions to be used for metrics whose value type is +# ## distribution. These aggregate values are recorded in in addition to raw +# ## bucket counts; if they are enabled. +# ## +# ## For a list of aligner strings see: +# ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner +# # distribution_aggregation_aligners = [ +# # "ALIGN_PERCENTILE_99", +# # "ALIGN_PERCENTILE_95", +# # "ALIGN_PERCENTILE_50", +# # ] +# +# ## Filters can be added to reduce the number of time series matched. All +# ## functions are supported: starts_with, ends_with, has_substring, and +# ## one_of. Only the '=' operator is supported. +# ## +# ## The logical operators when combining filters are defined statically using +# ## the following values: +# ## filter ::= {AND } +# ## resource_labels ::= {OR } +# ## metric_labels ::= {OR } +# ## +# ## For more details, see https://cloud.google.com/monitoring/api/v3/filters +# # +# ## Resource labels refine the time series selection with the following expression: +# ## resource.labels. = +# # [[inputs.stackdriver.filter.resource_labels]] +# # key = "instance_name" +# # value = 'starts_with("localhost")' +# # +# ## Metric labels refine the time series selection with the following expression: +# ## metric.labels. = +# # [[inputs.stackdriver.filter.metric_labels]] +# # key = "device_name" +# # value = 'one_of("sda", "sdb")' + + +# # Get synproxy counter statistics from procfs +# [[inputs.synproxy]] +# instance_id = "" # REQUIRED +# # no other configuration options + + +# # Sysstat metrics collector +# [[inputs.sysstat]] +# instance_id = "" # REQUIRED +# ## Path to the sadc command. +# # +# ## Common Defaults: +# ## Debian/Ubuntu: /usr/lib/sysstat/sadc +# ## Arch: /usr/lib/sa/sadc +# ## RHEL/CentOS: /usr/lib64/sa/sadc +# sadc_path = "/usr/lib/sa/sadc" # required +# +# ## Path to the sadf command, if it is not in PATH +# # sadf_path = "/usr/bin/sadf" +# +# ## Activities is a list of activities, that are passed as argument to the +# ## sadc collector utility (e.g: DISK, SNMP etc...) +# ## The more activities that are added, the more data is collected. +# # activities = ["DISK"] +# +# ## Group metrics to measurements. +# ## +# ## If group is false each metric will be prefixed with a description +# ## and represents itself a measurement. +# ## +# ## If Group is true, corresponding metrics are grouped to a single measurement. +# # group = true +# +# ## Options for the sadf command. The values on the left represent the sadf +# ## options and the values on the right their description (which are used for +# ## grouping and prefixing metrics). +# ## +# ## Run 'sar -h' or 'man sar' to find out the supported options for your +# ## sysstat version. +# [inputs.sysstat.options] +# -C = "cpu" +# -B = "paging" +# -b = "io" +# -d = "disk" # requires DISK activity +# "-n ALL" = "network" +# "-P ALL" = "per_cpu" +# -q = "queue" +# -R = "mem" +# -r = "mem_util" +# -S = "swap_util" +# -u = "cpu_util" +# -v = "inode" +# -W = "swap" +# -w = "task" +# # -H = "hugepages" # only available for newer linux distributions +# # "-I ALL" = "interrupts" # requires INT activity +# +# ## Device tags can be used to add additional tags for devices. +# ## For example the configuration below adds a tag vg with value rootvg for +# ## all metrics with sda devices. +# # [[inputs.sysstat.device_tags.sda]] +# # vg = "rootvg" + + +# # Gather systemd units state +# [[inputs.systemd_units]] +# instance_id = "" # REQUIRED +# ## Set timeout for systemctl execution +# # timeout = "1s" +# # +# ## Filter for a specific unit type, default is "service", other possible +# ## values are "socket", "target", "device", "mount", "automount", "swap", +# ## "timer", "path", "slice" and "scope ": +# # unittype = "service" + + +# # Reads metrics from a Teamspeak 3 Server via ServerQuery +# [[inputs.teamspeak]] +# instance_id = "" # REQUIRED +# ## Server address for Teamspeak 3 ServerQuery +# # server = "127.0.0.1:10011" +# ## Username for ServerQuery +# username = "serverqueryuser" +# ## Password for ServerQuery +# password = "secret" +# ## Array of virtual servers +# # virtual_servers = [1] + + +# # Read metrics about temperature +# [[inputs.temp]] +# instance_id = "" # REQUIRED +# # no other configuration options + + +# # Read Tengine's basic status information (ngx_http_reqstat_module) +# [[inputs.tengine]] +# instance_id = "" # REQUIRED +# # An array of Tengine reqstat module URI to gather stats. +# urls = ["http://127.0.0.1/us"] +# +# # HTTP response timeout (default: 5s) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.cer" +# # tls_key = "/etc/circonus-unified-agent/key.key" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather metrics from the Tomcat server status page. +# [[inputs.tomcat]] +# instance_id = "" # REQUIRED +# ## URL of the Tomcat server status +# # url = "http://127.0.0.1:8080/manager/status/all?XML=true" +# +# ## HTTP Basic Auth Credentials +# # username = "tomcat" +# # password = "s3cret" +# +# ## Request timeout +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Inserts sine and cosine waves for demonstration purposes +# [[inputs.trig]] +# instance_id = "" # REQUIRED +# ## Set the amplitude +# amplitude = 10.0 + + +# # Read Twemproxy stats data +# [[inputs.twemproxy]] +# instance_id = "" # REQUIRED +# ## Twemproxy stats address and port (no scheme) +# addr = "localhost:22222" +# ## Monitor pool name +# pools = ["redis_pool", "mc_pool"] + + +# # A plugin to collect stats from the Unbound DNS resolver +# [[inputs.unbound]] +# instance_id = "" # REQUIRED +# ## Address of server to connect to, read from unbound conf default, optionally ':port' +# ## Will lookup IP if given a hostname +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the unbound-control binary can be overridden with: +# # binary = "/usr/sbin/unbound-control" +# +# ## The default location of the unbound config file can be overridden with: +# # config_file = "/etc/unbound/unbound.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" +# +# ## When set to true, thread metrics are tagged with the thread id. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# thread_as_tag = false + + +# # Read uWSGI metrics. +# [[inputs.uwsgi]] +# instance_id = "" # REQUIRED +# ## List with urls of uWSGI Stats servers. URL must match pattern: +# ## scheme://address[:port] +# ## +# ## For example: +# ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] +# servers = ["tcp://127.0.0.1:1717"] +# +# ## General connection timeout +# # timeout = "5s" + + +# # A plugin to collect stats from Varnish HTTP Cache +# [[inputs.varnish]] +# instance_id = "" # REQUIRED +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false +# +# ## The default location of the varnishstat binary can be overridden with: +# binary = "/usr/bin/varnishstat" +# +# ## By default, agent gathers stats for 3 metric points. +# ## Setting stats will override the defaults shown below. +# ## Glob matching can be used, ie, stats = ["MAIN.*"] +# ## stats may also be set to ["*"], which will collect all stats +# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] +# +# ## Optional name for the varnish instance (or working directory) to query +# ## Usually append after -n in varnish cli +# # instance_name = instanceName +# +# ## Timeout for varnishstat command +# # timeout = "1s" + + +# # Collect Wireguard server interface and peer statistics +# [[inputs.wireguard]] +# instance_id = "" # REQUIRED +# ## Optional list of Wireguard device/interface names to query. +# ## If omitted, all Wireguard interfaces are queried. +# # devices = ["wg0"] + + +# # Monitor wifi signal strength and quality +# [[inputs.wireless]] +# instance_id = "" # REQUIRED +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" + + +# # Reads metrics from a SSL certificate +# [[inputs.x509_cert]] +# instance_id = "" # REQUIRED +# ## List certificate sources +# sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"] +# +# ## Timeout for SSL connection +# # timeout = "5s" +# +# ## Pass a different name into the TLS request (Server Name Indication) +# ## example: server_name = "myhost.example.org" +# # server_name = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" + + +# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools +# [[inputs.zfs]] +# instance_id = "" # REQUIRED +# ## an instance id is required +# instance_id = "" +# ## By default, gather zpool stats +# poolMetrics = true +# +# # ATTENTION LINUX USERS: +# # Because circonus-unified-agent normally runs as an unprivileged user, it may not be +# # able to run "zpool {status,list}" without root privileges, due to the +# # permissions on /dev/zfs. +# # This was addressed in ZFSonLinux 0.7.0 and later. +# # See https://github.com/zfsonlinux/zfs/issues/362 for a potential workaround +# # if your distribution does not support unprivileged access to /dev/zfs. +# +# ## ZFS kstat path. Ignored on FreeBSD +# ## If not specified, then default is: +# # kstatPath = "/proc/spl/kstat/zfs" +# +# ## By default, agent gathers all zfs stats +# ## If not specified, then default is: +# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] +# ## For Linux, the default is: +# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats", +# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] + + +# # Reads 'mntr' stats from one or many zookeeper servers +# [[inputs.zookeeper]] +# instance_id = "" # REQUIRED +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie localhost:2181, 10.0.0.1:2181, etc. +# +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 2181 is used +# servers = [":2181"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true + + +############################################################################### +# SERVICE INPUT PLUGINS # +############################################################################### + + +# # Intel Resource Director Technology plugin +# [[inputs.IntelRDT]] +# instance_id = "" # REQUIRED +# ## Optionally set sampling interval to Nx100ms. +# ## This value is propagated to pqos tool. Interval format is defined by pqos itself. +# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. +# # sampling_interval = "10" +# +# ## Optionally specify the path to pqos executable. +# ## If not provided, auto discovery will be performed. +# # pqos_path = "/usr/local/bin/pqos" +# +# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. +# ## If not provided, default value is false. +# # shortened_metrics = false +# +# ## Specify the list of groups of CPU core(s) to be provided as pqos input. +# ## Mandatory if processes aren't set and forbidden if processes are specified. +# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] +# # cores = ["0-3"] +# +# ## Specify the list of processes for which Metrics will be collected. +# ## Mandatory if cores aren't set and forbidden if cores are specified. +# ## e.g. ["qemu", "pmd"] +# # processes = ["process"] + + +# # AMQP consumer plugin +# [[inputs.amqp_consumer]] +# instance_id = "" # REQUIRED +# ## Broker to consume from. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to consume from. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Name of the exchange to declare. If unset, no exchange will be declared. +# exchange = "circonus" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_property" = "timestamp"} +# +# ## AMQP queue name. +# queue = "circonus" +# +# ## AMQP queue durability can be "transient" or "durable". +# queue_durability = "durable" +# +# ## If true, queue will be passively declared. +# # queue_passive = false +# +# ## A binding between the exchange and queue using this binding key is +# ## created. If unset, no binding is created. +# binding_key = "#" +# +# ## Maximum number of messages server should give to the worker. +# # prefetch_count = 50 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read Cassandra metrics through Jolokia +# [[inputs.cassandra]] +# instance_id = "" # REQUIRED +# ## DEPRECATED: The cassandra plugin has been deprecated. Please use the +# ## jolokia2 plugin instead. +# ## +# ## see https://github.com/circonus-labs/circonus-unified-agent/tree/master/plugins/inputs/jolokia2 +# +# context = "/jolokia/read" +# ## List of cassandra servers exposing jolokia read service +# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] +# ## List of metrics collected on above servers +# ## Each metric consists of a jmx path. +# ## This will collect all heap memory usage metrics from the jvm and +# ## ReadLatency metrics for all keyspaces and tables. +# ## "type=Table" in the query works with Cassandra3.0. Older versions might +# ## need to use "type=ColumnFamily" +# metrics = [ +# "/java.lang:type=Memory/HeapMemoryUsage", +# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" +# ] + + +# # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms +# [[inputs.cisco_telemetry_mdt]] +# instance_id = "" # REQUIRED +# ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when +# ## using the grpc transport. +# transport = "grpc" +# +# ## Address and port to host telemetry listener +# service_address = ":57000" +# +# ## Enable TLS; grpc transport only. +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# +# ## Enable TLS client authentication and define allowed CA certificates; grpc +# ## transport only. +# # tls_allowed_cacerts = ["/etc/circonus-unified-agent/clientca.pem"] +# +# ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags +# # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] +# +# ## Define aliases to map telemetry encoding paths to simple measurement names +# [inputs.cisco_telemetry_mdt.aliases] +# ifstats = "ietf-interfaces:interfaces-state/interface/statistics" + + +# # Read metrics from one or many ClickHouse servers +# [[inputs.clickhouse]] +# instance_id = "" # REQUIRED +# ## Username for authorization on ClickHouse server +# ## example: username = "default"" +# username = "default" +# +# ## Password for authorization on ClickHouse server +# ## example: password = "super_secret" +# +# ## HTTP(s) timeout while getting metrics values +# ## The timeout includes connection time, any redirects, and reading the response body. +# ## example: timeout = 1s +# # timeout = 5s +# +# ## List of servers for metrics scraping +# ## metrics scrape via HTTP(s) clickhouse interface +# ## https://clickhouse.tech/docs/en/interfaces/http/ +# ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] +# servers = ["http://127.0.0.1:8123"] +# +# ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster +# ## with using same "user:password" described in "user" and "password" parameters +# ## and get this server hostname list from "system.clusters" table +# ## see +# ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters +# ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers +# ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ +# ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables +# ## example: auto_discovery = false +# # auto_discovery = true +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster IN (...)" filter will apply +# ## please use only full cluster names here, regexp and glob filters is not allowed +# ## for "/etc/clickhouse-server/config.d/remote.xml" +# ## +# ## +# ## +# ## +# ## clickhouse-ru-1.local9000 +# ## clickhouse-ru-2.local9000 +# ## +# ## +# ## clickhouse-eu-1.local9000 +# ## clickhouse-eu-2.local9000 +# ## +# ## +# ## +# ## +# ## +# ## +# ## example: cluster_include = ["my-own-cluster"] +# # cluster_include = [] +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply +# ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] +# # cluster_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from Google PubSub +# [[inputs.cloud_pubsub]] +# instance_id = "" # REQUIRED +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub subscription. +# project = "my-project" +# +# ## Required. Name of PubSub subscription to ingest metrics from. +# subscription = "my-subscription" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, the agent will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. Number of seconds to wait before attempting to restart the +# ## PubSub subscription receiver after an unexpected error. +# ## If the streaming pull for a PubSub Subscription fails (receiver), +# ## the agent attempts to restart receiving messages after this many seconds. +# # retry_delay_seconds = 5 +# +# ## Optional. Maximum byte length of a message to consume. +# ## Larger messages are dropped with an error. If less than 0 or unspecified, +# ## treated as no limit. +# # max_message_len = 1000000 +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## The following are optional Subscription ReceiveSettings in PubSub. +# ## Read more about these values: +# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings +# +# ## Optional. Maximum number of seconds for which a PubSub subscription +# ## should auto-extend the PubSub ACK deadline for each message. If less than +# ## 0, auto-extension is disabled. +# # max_extension = 0 +# +# ## Optional. Maximum number of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_messages = 0 +# +# ## Optional. Maximum size in bytes of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_bytes = 0 +# +# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn +# ## to pull messages from PubSub concurrently. This limit applies to each +# ## subscription separately and is treated as the PubSub default if less than +# ## 1. Note this setting does not limit the number of messages that can be +# ## processed concurrently (use "max_outstanding_messages" instead). +# # max_receiver_go_routines = 0 +# +# ## Optional. If true, agent will attempt to base64 decode the +# ## PubSub message data before parsing +# # base64_data = false + + +# # Google Cloud Pub/Sub Push HTTP listener +# [[inputs.cloud_pubsub_push]] +# instance_id = "" # REQUIRED +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Application secret to verify messages originate from Cloud Pub/Sub +# # token = "" +# +# ## Path to listen to. +# # path = "/" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response. This should be set to a value +# ## large enough that you can send at least 'metric_batch_size' number of messages within the +# ## duration. +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. +# # add_meta = false +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/circonus-unified-agent/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read logging output from the Docker engine +# [[inputs.docker_log]] +# instance_id = "" # REQUIRED +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# # endpoint = "unix:///var/run/docker.sock" +# +# ## When true, container logs are read from the beginning; otherwise +# ## reading begins at the end of the log. +# # from_beginning = false +# +# ## Timeout for Docker API calls. +# # timeout = "5s" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# # docker_label_include = [] +# # docker_label_exclude = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Azure Event Hubs service input plugin +# [[inputs.eventhub_consumer]] +# instance_id = "" # REQUIRED +# ## The default behavior is to create a new Event Hub client from environment variables. +# ## This requires one of the following sets of environment variables to be set: +# ## +# ## 1) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "EVENTHUB_CONNECTION_STRING" +# ## +# ## 2) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "EVENTHUB_KEY_NAME" +# ## - "EVENTHUB_KEY_VALUE" +# +# ## Uncommenting the option below will create an Event Hub client based solely on the connection string. +# ## This can either be the associated environment variable or hard coded directly. +# # connection_string = "" +# +# ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister +# # persistence_dir = "" +# +# ## Change the default consumer group +# # consumer_group = "" +# +# ## By default the event hub receives all messages present on the broker, alternative modes can be set below. +# ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). +# ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). +# # from_timestamp = +# # latest = true +# +# ## Set a custom prefetch count for the receiver(s) +# # prefetch_count = 1000 +# +# ## Add an epoch to the receiver(s) +# # epoch = 0 +# +# ## Change to set a custom user agent, "circonus-unified-agent" is used by default +# # user_agent = "circonus-unified-agent" +# +# ## To consume from a specific partition, set the partition_ids option. +# ## An empty array will result in receiving from all partitions. +# # partition_ids = ["0","1"] +# +# ## Max undelivered messages +# # max_undelivered_messages = 1000 +# +# ## Set either option below to true to use a system property as timestamp. +# ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. +# ## It is recommended to use this setting when the data itself has no timestamp. +# # enqueued_time_as_ts = true +# # iot_hub_enqueued_time_as_ts = true +# +# ## Tags or fields to create from keys present in the application property bag. +# ## These could for example be set by message enrichments in Azure IoT Hub. +# # application_property_tags = [] +# # application_property_fields = [] +# +# ## Tag or field name to use for metadata +# ## By default all metadata is disabled +# # sequence_number_field = "SequenceNumber" +# # enqueued_time_field = "EnqueuedTime" +# # offset_field = "Offset" +# # partition_id_tag = "PartitionID" +# # partition_key_tag = "PartitionKey" +# # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" +# # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" +# # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" +# # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" +# # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Run executable as long-running input plugin +# [[inputs.execd]] +# instance_id = "" # REQUIRED +# ## Program to run as daemon +# command = ["smartctl", "-d", "/dev/sda"] +# +# ## Define how the process is signaled on each collection interval. +# ## Valid values are: +# ## "none" : Do not signal anything. +# ## The process must output metrics by itself. +# ## "STDIN" : Send a newline on STDIN. +# ## "SIGHUP" : Send a HUP signal. Not available on Windows. +# ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. +# ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. +# signal = "none" +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # gNMI telemetry input plugin +# [[inputs.gnmi]] +# instance_id = "" # REQUIRED +# ## Address and port of the gNMI GRPC server +# addresses = ["10.49.234.114:57777"] +# +# ## define credentials +# username = "cisco" +# password = "cisco" +# +# ## gNMI encoding requested (one of: "proto", "json", "json_ietf") +# # encoding = "proto" +# +# ## redial in case of failures after +# redial = "10s" +# +# ## enable client-side TLS and define CA to authenticate the device +# # enable_tls = true +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # insecure_skip_verify = true +# +# ## define client-side TLS certificate & key to authenticate to the device +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# +# ## gNMI subscription prefix (optional, can usually be left empty) +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# # origin = "" +# # prefix = "" +# # target = "" +# +# ## Define additional aliases to map telemetry encoding paths to simple measurement names +# #[inputs.gnmi.aliases] +# # ifcounters = "openconfig:/interfaces/interface/state/counters" +# +# [[inputs.gnmi.subscription]] +# ## Name of the measurement that will be emitted +# name = "ifcounters" +# +# ## Origin and path of the subscription +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# ## +# ## origin usually refers to a (YANG) data model implemented by the device +# ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) +# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr +# origin = "openconfig-interfaces" +# path = "/interfaces/interface/state/counters" +# +# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval +# subscription_mode = "sample" +# sample_interval = "10s" +# +# ## Suppress redundant transmissions when measured values are unchanged +# # suppress_redundant = false +# +# ## If suppression is enabled, send updates at least every X seconds anyway +# # heartbeat_interval = "60s" + + +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.http_listener]] +# instance_id = "" # REQUIRED +# ## Address and port to host InfluxDB listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" +# +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/circonus-unified-agent/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/circonus-unified-agent/cert.pem" +# tls_key = "/etc/circonus-unified-agent/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + +# # Generic HTTP write listener +# [[inputs.http_listener_v2]] +# instance_id = "" # REQUIRED +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Path to listen to. +# # path = "/circonus" +# +# ## HTTP methods to accept. +# # methods = ["POST", "PUT"] +# +# ## maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Part of the request to consume. Available options are "body" and +# ## "query". +# # data_source = "body" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/circonus-unified-agent/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Optional setting to map http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.influxdb_listener]] +# instance_id = "" # REQUIRED +# ## Address and port to host InfluxDB listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" +# +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/circonus-unified-agent/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/circonus-unified-agent/cert.pem" +# tls_key = "/etc/circonus-unified-agent/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + +# # Accept metrics over InfluxDB 2.x HTTP API +# [[inputs.influxdb_v2_listener]] +# instance_id = "" # REQUIRED +# ## Address and port to host InfluxDB listener on +# ## (Double check the port. Could be 9999 if using OSS Beta) +# service_address = ":8086" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# # max_body_size = "32MiB" +# +# ## Optional tag to determine the bucket. +# ## If the write has a bucket in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # bucket_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/circonus-unified-agent/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# +# ## Optional token to accept for HTTP authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # token = "some-long-shared-secret-token" + + +# # Read JTI OpenConfig Telemetry from listed sensors +# [[inputs.jti_openconfig_telemetry]] +# instance_id = "" # REQUIRED +# ## List of device addresses to collect telemetry from +# servers = ["localhost:1883"] +# +# ## Authentication details. Username and password are must if device expects +# ## authentication. Client ID must be unique when connecting from multiple instances +# ## of the agent to the same device +# username = "user" +# password = "pass" +# client_id = "circonus" +# +# ## Frequency to get data +# sample_frequency = "1000ms" +# +# ## Sensors to subscribe for +# ## A identifier for each sensor can be provided in path by separating with space +# ## Else sensor path will be used as identifier +# ## When identifier is used, we can provide a list of space separated sensors. +# ## A single subscription will be created with all these sensors and data will +# ## be saved to measurement with this identifier name +# sensors = [ +# "/interfaces/", +# "collection /components/ /lldp", +# ] +# +# ## We allow specifying sensor group level reporting rate. To do this, specify the +# ## reporting rate in Duration at the beginning of sensor paths / collection +# ## name. For entries without reporting rate, we use configured sample frequency +# sensors = [ +# "1000ms customReporting /interfaces /lldp", +# "2000ms collection /components", +# "/interfaces", +# ] +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. +# ## Failed streams/calls will not be retried if 0 is provided +# retry_delay = "1000ms" +# +# ## To treat all string values as tags, set this to true +# str_as_tags = false + + +# # Read metrics from Kafka topics +# [[inputs.kafka_consumer]] +# instance_id = "" # REQUIRED +# ## Kafka brokers. +# brokers = ["localhost:9092"] +# +# ## Topics to consume. +# topics = ["circonus"] +# +# ## When set this tag will be added to all metrics with the topic as the value. +# # topic_tag = "" +# +# ## Optional Client id +# # client_id = "Circonus" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Must be 0.10.2.0 or greater. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## SASL authentication credentials. These settings should typically be used +# ## with TLS encryption enabled using the "enable_tls" option. +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# ## Name of the consumer group. +# # consumer_group = "circonus_metrics_consumers" +# +# ## Initial offset position; one of "oldest" or "newest". +# # offset = "oldest" +# +# ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". +# # balance_strategy = "range" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 1000000 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from Kafka topic(s) +# [[inputs.kafka_consumer_legacy]] +# instance_id = "" # REQUIRED +# ## topic(s) to consume +# topics = ["circonus"] +# +# ## an array of Zookeeper connection strings +# zookeeper_peers = ["localhost:2181"] +# +# ## Zookeeper Chroot +# zookeeper_chroot = "" +# +# ## the name of the consumer group +# consumer_group = "circonus_metrics_consumers" +# +# ## Offset (must be either "oldest" or "newest") +# offset = "oldest" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 65536 + + +# # Configuration for the AWS Kinesis input. +# [[inputs.kinesis_consumer]] +# instance_id = "" # REQUIRED +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Assumed credentials via STS if role_arn is specified +# ## 2) explicit credentials from 'access_key' and 'secret_key' +# ## 3) shared profile from 'profile' +# ## 4) environment variables +# ## 5) shared credentials file +# ## 6) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting the agent. +# streamname = "StreamName" +# +# ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) +# # shard_iterator_type = "TRIM_HORIZON" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional +# ## Configuration for a dynamodb checkpoint +# [inputs.kinesis_consumer.checkpoint_dynamodb] +# ## unique name for this consumer +# app_name = "default" +# table_name = "default" + + +# # Read metrics off Arista LANZ, via socket +# [[inputs.lanz]] +# instance_id = "" # REQUIRED +# ## URL to Arista LANZ endpoint +# servers = [ +# "tcp://127.0.0.1:50001" +# ] + + +# # Stream and parse log file(s). +# [[inputs.logparser]] +# instance_id = "" # REQUIRED +# ## Log files to parse. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/**.log -> recursively find all .log files in /var/log +# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log +# ## /var/log/apache.log -> only tail the apache log file +# files = ["/var/log/apache/access.log"] +# +# ## Read files that currently exist from the beginning. Files that are created +# ## while the agent is running (and that match the "files" globs) will always +# ## be read from the beginning. +# from_beginning = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Parse logstash-style "grok" patterns: +# [inputs.logparser.grok] +# ## This is a list of patterns to check the given log file(s) for. +# ## Note that adding patterns here increases processing time. The most +# ## efficient configuration is to have one pattern per logparser. +# ## Other common built-in patterns are: +# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) +# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) +# patterns = ["%{COMBINED_LOG_FORMAT}"] +# +# ## Name of the outputted measurement name. +# measurement = "apache_access_log" +# +# ## Full path(s) to custom pattern files. +# custom_pattern_files = [] +# +# ## Custom patterns can also be defined here. Put one pattern per line. +# custom_patterns = ''' +# ''' +# +# ## Timezone allows you to provide an override for timestamps that +# ## don't already include an offset +# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs +# ## +# ## Default: "" which renders UTC +# ## Options are as follows: +# ## 1. Local -- interpret based on machine localtime +# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones +# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC +# # timezone = "Canada/Eastern" +# +# ## When set to "disable", timestamp will not incremented if there is a +# ## duplicate. +# # unique_timestamp = "auto" + + +# # Read metrics from MQTT topic(s) +# [[inputs.mqtt_consumer]] +# instance_id = "" # REQUIRED +# ## Broker URLs for the MQTT server or cluster. To connect to multiple +# ## clusters or standalone servers, use a seperate plugin instance. +# ## example: servers = ["tcp://localhost:1883"] +# ## servers = ["ssl://localhost:1883"] +# ## servers = ["ws://localhost:1883"] +# servers = ["tcp://127.0.0.1:1883"] +# +# ## Topics that will be subscribed to. +# topics = [ +# "circonus/host01/cpu", +# "circonus/+/mem", +# "sensors/#", +# ] +# +# ## The message topic will be stored in a tag specified by this value. If set +# ## to the empty string no topic tag will be created. +# # topic_tag = "topic" +# +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# ## +# ## When using a QoS of 1 or 2, you should enable persistent_session to allow +# ## resuming unacknowledged messages. +# # qos = 0 +# +# ## Connection timeout for initial connection in seconds +# # connection_timeout = "30s" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Persistent session disables clearing of the client session on connection. +# ## In order for this option to work you must also set client_id to identify +# ## the client. To receive messages that arrived while the client is offline, +# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when +# ## publishing. +# # persistent_session = false +# +# ## If unset, a random client ID will be generated. +# # client_id = "" +# +# ## Username and password to connect MQTT server. +# # username = "circonus" +# # password = "metricsmetricsmetricsmetrics" +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from NATS subject(s) +# [[inputs.nats_consumer]] +# instance_id = "" # REQUIRED +# ## urls of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## subject(s) to consume +# subjects = ["circonus"] +# +# ## name a queue group +# queue_group = "circonus_consumers" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/circonus-unified-agent/nats.creds" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/circonus-unified-agent/ca.pem" +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the limits for pending msgs and bytes for each subscription +# ## These shouldn't need to be adjusted except in very high throughput scenarios +# # pending_message_limit = 65536 +# # pending_bytes_limit = 67108864 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read NSQ topic for metrics. +# [[inputs.nsq_consumer]] +# instance_id = "" # REQUIRED +# ## Server option still works but is deprecated, we just prepend it to the nsqd array. +# # server = "localhost:4150" +# +# ## An array representing the NSQD TCP HTTP Endpoints +# nsqd = ["localhost:4150"] +# +# ## An array representing the NSQLookupd HTTP Endpoints +# nsqlookupd = ["localhost:4161"] +# topic = "circonus" +# channel = "consumer" +# max_in_flight = 100 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from one or many pgbouncer servers +# [[inputs.pgbouncer]] +# instance_id = "" # REQUIRED +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# address = "host=localhost user=pgbouncer sslmode=disable" + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql]] +# instance_id = "" # REQUIRED +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# ## +# address = "host=localhost user=postgres sslmode=disable" +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# max_lifetime = "0s" +# +# ## A list of databases to explicitly ignore. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'databases' option. +# # ignored_databases = ["postgres", "template0", "template1"] +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'ignored_databases' option. +# # databases = ["app_production", "testing"] + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql_extensible]] +# instance_id = "" # REQUIRED +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# # +# ## All connection parameters are optional. # +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# # +# address = "host=localhost user=postgres sslmode=disable" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# max_lifetime = "0s" +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. +# ## databases = ["app_production", "testing"] +# # +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# # +# ## Define the toml config where the sql queries are stored +# ## New queries can be added, if the withdbname is set to true and there is no +# ## databases defined in the 'databases field', the sql query is ended by a +# ## 'is not null' in order to make the query succeed. +# ## Example : +# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become +# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" +# ## because the databases variable was set to ['postgres', 'pgbench' ] and the +# ## withdbname was true. Be careful that if the withdbname is set to false you +# ## don't have to define the where clause (aka with the dbname) the tagvalue +# ## field is used to define custom tags (separated by commas) +# ## The optional "measurement" value can be used to override the default +# ## output measurement name ("postgresql"). +# ## +# ## The script option can be used to specify the .sql file path. +# ## If script and sqlquery options specified at same time, sqlquery will be used +# ## +# ## Structure : +# ## [[inputs.postgresql_extensible.query]] +# ## sqlquery string +# ## version string +# ## withdbname boolean +# ## tagvalue string (comma separated) +# ## measurement string +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_database" +# version=901 +# withdbname=false +# tagvalue="" +# measurement="" +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_bgwriter" +# version=901 +# withdbname=false +# tagvalue="postgresql.stats" + + +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# instance_id = "" # REQUIRED +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] +# +# ## Metric version controls the mapping from Prometheus metrics into +# ## internal metrics. When using the prometheus_client output, use the same +# ## value in both plugins to ensure metrics are round-tripped without +# ## modification. +# ## +# ## example: metric_version = 1; deprecated in 1.13 +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## Url tag name (tag containing scrapped url. optional, default is "url") +# # url_tag = "scrapeUrl" +# +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] +# +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" +# +# ## Scrape Kubernetes pods for the following prometheus annotations: +# ## - prometheus.io/scrape: Enable scraping for this pod +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to +# ## set this to 'https' & most likely set the tls config. +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. +# ## - prometheus.io/port: If port is not 9102 use this annotation +# # monitor_kubernetes_pods = true +# ## Restricts Kubernetes monitoring to a single namespace +# ## ex: monitor_kubernetes_pods_namespace = "default" +# # monitor_kubernetes_pods_namespace = "" +# # label selector to target pods which have the label +# # kubernetes_label_selector = "env=dev,app=nginx" +# # field selector to target pods +# # eg. To scrape pods on a specific node +# # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## HTTP Basic Authentication username and password. ('bearer_token' and +# ## 'bearer_token_string' take priority) +# # username = "" +# # password = "" +# +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# # response_timeout = "3s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # SFlow V5 Protocol Listener +# [[inputs.sflow]] +# instance_id = "" # REQUIRED +# ## Address to listen for sFlow packets. +# ## example: service_address = "udp://:6343" +# ## service_address = "udp4://:6343" +# ## service_address = "udp6://:6343" +# service_address = "udp://:6343" +# +# ## Set the size of the operating system's receive buffer. +# ## example: read_buffer_size = "64KiB" +# # read_buffer_size = "" + + +# # Receive SNMP traps +# [[inputs.snmp_trap]] +# instance_id = "" # REQUIRED +# ## Transport, local address, and port to listen on. Transport must +# ## be "udp://". Omit local address to listen on all interfaces. +# ## example: "udp://127.0.0.1:1234" +# ## +# ## Special permissions may be required to listen on a port less than +# ## 1024. See README.md for details +# ## +# # service_address = "udp://:162" +# ## Timeout running snmptranslate command +# # timeout = "5s" +# ## Snmp version, defaults to 2c +# # version = "2c" +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" + + +# # Generic socket listener capable of handling multiple socket types. +# [[inputs.socket_listener]] +# instance_id = "" # REQUIRED +# ## URL to listen on +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# # service_address = "udp://:8094" +# # service_address = "udp4://:8094" +# # service_address = "udp6://:8094" +# # service_address = "unix:///tmp/circonus.sock" +# # service_address = "unixgram:///tmp/circonus.sock" +# +# ## Change the file mode bits on unix sockets. These permissions may not be +# ## respected by some platforms, to safely restrict write permissions it is best +# ## to place the socket into a directory that has previously been created +# ## with the desired permissions. +# ## ex: socket_mode = "777" +# # socket_mode = "" +# +# ## Maximum number of concurrent connections. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# +# ## Read timeout. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# +# ## Optional TLS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/circonus-unified-agent/clientca.pem"] +# +# ## Maximum socket buffer size (in bytes when no unit specified). +# ## For stream sockets, once the buffer fills up, the sender will start backing up. +# ## For datagram sockets, once the buffer fills up, metrics will start dropping. +# ## Defaults to the OS default. +# # read_buffer_size = "64KiB" +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" + + +# # Statsd UDP/TCP Server +# [[inputs.statsd]] +# instance_id = "" # REQUIRED +# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp) +# protocol = "udp" +# +# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) +# max_tcp_connections = 250 +# +# ## Enable TCP keep alive probes (default=false) +# tcp_keep_alive = false +# +# ## Specifies the keep-alive period for an active network connection. +# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false. +# ## Defaults to the OS configuration. +# # tcp_keep_alive_period = "2h" +# +# ## Address and port to host UDP listener on +# service_address = ":8125" +# +# ## The following configuration options control when the agent clears its cache +# ## of previous values. If set to false, then the agent will only clear its +# ## cache when the daemon is restarted. +# ## Reset gauges every interval (default=true) +# delete_gauges = true +# ## Reset counters every interval (default=true) +# delete_counters = true +# ## Reset sets every interval (default=true) +# delete_sets = true +# ## Reset timings & histograms every interval (default=true) +# delete_timings = true +# +# ## Percentiles to calculate for timing & histogram stats +# percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] +# +# ## separator to use between elements of a statsd metric +# metric_separator = "_" +# +# ## Parses tags in the datadog statsd format +# ## http://docs.datadoghq.com/guides/dogstatsd/ +# parse_data_dog_tags = false +# +# ## Parses datadog extensions to the statsd format +# datadog_extensions = false +# +# ## Statsd data translation templates, more info can be read here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/TEMPLATE_PATTERN.md +# # templates = [ +# # "cpu.* measurement*" +# # ] +# +# ## Number of UDP messages allowed to queue up, once filled, +# ## the statsd server will start dropping packets +# allowed_pending_messages = 10000 +# +# ## Number of timing/histogram values to track per-measurement in the +# ## calculation of percentiles. Raising this limit increases the accuracy +# ## of percentiles but also increases the memory usage and cpu time. +# percentile_limit = 1000 + + +# # Suricata stats plugin +# [[inputs.suricata]] +# instance_id = "" # REQUIRED +# ## Data sink for Suricata stats log +# # This is expected to be a filename of a +# # unix socket to be created for listening. +# source = "/var/run/suricata-stats.sock" +# +# # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" +# # becomes "detect_alert" when delimiter is "_". +# delimiter = "_" + + +# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 +# [[inputs.syslog]] +# instance_id = "" # REQUIRED +# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 +# ## Protocol, address and port to host the syslog receiver. +# ## If no host is specified, then localhost is used. +# ## If no port is specified, 6514 is used (RFC5425#section-4.1). +# server = "tcp://:6514" +# +# ## TLS Config +# # tls_allowed_cacerts = ["/etc/circonus-unified-agent/ca.pem"] +# # tls_cert = "/etc/circonus-unified-agent/cert.pem" +# # tls_key = "/etc/circonus-unified-agent/key.pem" +# +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # keep_alive_period = "5m" +# +# ## Maximum number of concurrent connections (default = 0). +# ## 0 means unlimited. +# ## Only applies to stream sockets (e.g. TCP). +# # max_connections = 1024 +# +# ## Read timeout is the maximum time allowed for reading a single message (default = 5s). +# ## 0 means unlimited. +# # read_timeout = "5s" +# +# ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). +# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). +# ## Must be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## Whether to parse in best effort mode or not (default = false). +# ## By default best effort parsing is off. +# # best_effort = false +# +# ## Character to prepend to SD-PARAMs (default = "_"). +# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. +# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] +# ## For each combination a field is created. +# ## Its name is created concatenating identifier, sdparam_separator, and parameter name. +# # sdparam_separator = "_" + + +# # Parse the new lines appended to a file +# [[inputs.tail]] +# instance_id = "" # REQUIRED +# ## File names or a pattern to tail. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/mymetrics.out"] +# +# ## Read file from beginning. +# # from_beginning = false +# +# ## Whether file is a named pipe +# # pipe = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set based on the number of metrics on each +# ## line and the size of the output's metric_batch_size. +# # max_undelivered_lines = 1000 +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/circonus-labs/circonus-unified-agent/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## multiline parser/codec +# ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html +# #[inputs.tail.multiline] +# ## The pattern should be a regexp which matches what you believe to be an +# ## indicator that the field is part of an event consisting of multiple lines of log data. +# #pattern = "^\s" +# +# ## This field must be either "previous" or "next". +# ## If a line matches the pattern, "previous" indicates that it belongs to the previous line, +# ## whereas "next" indicates that the line belongs to the next one. +# #match_which_line = "previous" +# +# ## The invert_match field can be true or false (defaults to false). +# ## If true, a message not matching the pattern will constitute a match of the multiline +# ## filter and the what will be applied. (vice-versa is also true) +# #invert_match = false +# +# ## After the specified timeout, this plugin sends a multiline event even if no new pattern +# ## is found to start a new event. The default timeout is 5s. +# #timeout = 5s + + +# # Read metrics from VMware vCenter +# [[inputs.vsphere]] +# instance_id = "" # REQUIRED +# ## List of vCenter URLs to be monitored. These three lines must be uncommented +# ## and edited for the plugin to work. +# vcenters = [ "https://vcenter.local/sdk" ] +# username = "user@corp.local" +# password = "secret" +# +# ## VMs +# ## Typical VM metrics (if omitted or empty, all metrics are collected) +# # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) +# # vm_exclude = [] # Inventory paths to exclude +# vm_metric_include = [ +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.run.summation", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.wait.summation", +# "mem.active.average", +# "mem.granted.average", +# "mem.latency.average", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.usage.average", +# "power.power.average", +# "virtualDisk.numberReadAveraged.average", +# "virtualDisk.numberWriteAveraged.average", +# "virtualDisk.read.average", +# "virtualDisk.readOIO.latest", +# "virtualDisk.throughput.usage.average", +# "virtualDisk.totalReadLatency.average", +# "virtualDisk.totalWriteLatency.average", +# "virtualDisk.write.average", +# "virtualDisk.writeOIO.latest", +# "sys.uptime.latest", +# ] +# # vm_metric_exclude = [] ## Nothing is excluded by default +# # vm_instances = true ## true by default +# +# ## Hosts +# ## Typical host metrics (if omitted or empty, all metrics are collected) +# # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) +# # host_exclude [] # Inventory paths to exclude +# host_metric_include = [ +# "cpu.coreUtilization.average", +# "cpu.costop.summation", +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.swapwait.summation", +# "cpu.usage.average", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.utilization.average", +# "cpu.wait.summation", +# "disk.deviceReadLatency.average", +# "disk.deviceWriteLatency.average", +# "disk.kernelReadLatency.average", +# "disk.kernelWriteLatency.average", +# "disk.numberReadAveraged.average", +# "disk.numberWriteAveraged.average", +# "disk.read.average", +# "disk.totalReadLatency.average", +# "disk.totalWriteLatency.average", +# "disk.write.average", +# "mem.active.average", +# "mem.latency.average", +# "mem.state.latest", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.totalCapacity.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.errorsRx.summation", +# "net.errorsTx.summation", +# "net.usage.average", +# "power.power.average", +# "storageAdapter.numberReadAveraged.average", +# "storageAdapter.numberWriteAveraged.average", +# "storageAdapter.read.average", +# "storageAdapter.write.average", +# "sys.uptime.latest", +# ] +# ## Collect IP addresses? Valid values are "ipv4" and "ipv6" +# # ip_addresses = ["ipv6", "ipv4" ] +# +# # host_metric_exclude = [] ## Nothing excluded by default +# # host_instances = true ## true by default +# +# +# ## Clusters +# # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # cluster_exclude = [] # Inventory paths to exclude +# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected +# # cluster_metric_exclude = [] ## Nothing excluded by default +# # cluster_instances = false ## false by default +# +# ## Datastores +# # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) +# # datastore_exclude = [] # Inventory paths to exclude +# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected +# # datastore_metric_exclude = [] ## Nothing excluded by default +# # datastore_instances = false ## false by default +# +# ## Datacenters +# # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # datacenter_exclude = [] # Inventory paths to exclude +# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected +# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. +# # datacenter_instances = false ## false by default +# +# ## Plugin Settings +# ## separator character to use for measurement and field names (default: "_") +# # separator = "_" +# +# ## number of objects to retrieve per query for realtime resources (vms and hosts) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_objects = 256 +# +# ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_metrics = 256 +# +# ## number of go routines to use for collection and discovery of objects and metrics +# # collect_concurrency = 1 +# # discover_concurrency = 1 +# +# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) +# # object_discovery_interval = "300s" +# +# ## timeout applies to any of the api request made to vcenter +# # timeout = "60s" +# +# ## When set to true, all samples are sent as integers. Normally all +# ## samples from vCenter, with the exception of percentages, are integer +# ## values, but under some conditions, some averaging takes place internally in +# ## the plugin. Setting this flag to "false" will send values as floats to +# ## preserve the full precision when averaging takes place. +# # use_int_samples = true +# +# ## Custom attributes from vCenter can be very useful for queries in order to slice the +# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled +# ## by default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# ## By default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# # custom_attribute_include = [] +# # custom_attribute_exclude = ["*"] +# +# ## Optional SSL Config +# # ssl_ca = "/path/to/cafile" +# # ssl_cert = "/path/to/certfile" +# # ssl_key = "/path/to/keyfile" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + +# # A Webhooks Event collector +# [[inputs.webhooks]] +# instance_id = "" # REQUIRED +# ## Address and port to host Webhook listener on +# service_address = ":1619" +# +# [inputs.webhooks.filestack] +# path = "/filestack" +# +# [inputs.webhooks.github] +# path = "/github" +# # secret = "" +# +# [inputs.webhooks.mandrill] +# path = "/mandrill" +# +# [inputs.webhooks.rollbar] +# path = "/rollbar" +# +# [inputs.webhooks.papertrail] +# path = "/papertrail" +# +# [inputs.webhooks.particle] +# path = "/particle" + + +# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures. +# [[inputs.zipkin]] +# instance_id = "" # REQUIRED +# # path = "/api/v1/spans" # URL path for span data +# # port = 9411 # Port on which the agent listens diff --git a/plugins/parsers/json_v2/README.md b/plugins/parsers/json_v2/README.md new file mode 100644 index 00000000..894954bf --- /dev/null +++ b/plugins/parsers/json_v2/README.md @@ -0,0 +1,268 @@ +# JSON Parser Version 2 Plugin + +This parser takes valid JSON input and turns it into line protocol. The query +syntax supported is [GJSON Path +Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md), you can go to +this playground to test out your GJSON path here: +[gjson.dev/](https://gjson.dev). You can find multiple examples under the +`testdata` folder. + +## Configuration + +```toml + [[inputs.file]] + urls = [] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "" # A string that will become the new measurement name + measurement_name_path = "" # A string with valid GJSON path syntax, will override measurement_name + timestamp_path = "" # A string with valid GJSON path syntax to a valid timestamp (single value) + timestamp_format = "" # A string with a valid timestamp format (see below for possible values) + timestamp_timezone = "" # A string with with a valid timezone (see below for possible values) + [[inputs.file.json_v2.tag]] + path = "" # A string with valid GJSON path syntax to a non-array/non-object value + rename = "new name" # A string with a new name for the tag key + ## Setting optional to true will suppress errors if the configured Path doesn't match the JSON + optional = false + [[inputs.file.json_v2.field]] + path = "" # A string with valid GJSON path syntax to a non-array/non-object value + rename = "new name" # A string with a new name for the tag key + type = "int" # A string specifying the type (int,uint,float,string,bool) + ## Setting optional to true will suppress errors if the configured Path doesn't match the JSON + optional = false + [[inputs.file.json_v2.object]] + path = "" # A string with valid GJSON path syntax, can include array's and object's + + ## Setting optional to true will suppress errors if the configured Path doesn't match the JSON + optional = false + + ## Configuration to define what JSON keys should be used as timestamps ## + timestamp_key = "" # A JSON key (for a nested key, prepend the parent keys with underscores) to a valid timestamp + timestamp_format = "" # A string with a valid timestamp format (see below for possible values) + timestamp_timezone = "" # A string with with a valid timezone (see below for possible values) + + ### Configuration to define what JSON keys should be included and how (field/tag) ### + tags = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) to be a tag instead of a field, when adding a JSON key in this list you don't have to define it in the included_keys list + included_keys = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that should be only included in result + excluded_keys = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that shouldn't be included in result + # When a tag/field sub-table is defined, they will be the only field/tag's along with any keys defined in the included_keys list. + # If the resulting values aren't included in the object/array returned by the root object path, it won't be included. + # You can define as many tag/field sub-tables as you want. + [[inputs.file.json_v2.object.tag]] + path = "" # # A string with valid GJSON path syntax, can include array's and object's + rename = "new name" # A string with a new name for the tag key + [[inputs.file.json_v2.object.field]] + path = "" # # A string with valid GJSON path syntax, can include array's and object's + rename = "new name" # A string with a new name for the tag key + type = "int" # A string specifying the type (int,uint,float,string,bool) + + ### Configuration to modify the resutling line protocol ### + disable_prepend_keys = false (or true, just not both) + [inputs.file.json_v2.object.renames] # A map of JSON keys (for a nested key, prepend the parent keys with underscores) with a new name for the tag key + key = "new name" + [inputs.file.json_v2.object.fields] # A map of JSON keys (for a nested key, prepend the parent keys with underscores) with a type (int,uint,float,string,bool) + key = "int" +``` + +You configure this parser by describing the line protocol you want by defining +the fields and tags from the input. The configuration is divided into config +sub-tables called `field`, `tag`, and `object`. In the example below you can see +all the possible configuration keys you can define for each config table. In the +sections that follow these configuration keys are defined in more detail. + +--- + +### root config options + +* **measurement_name (OPTIONAL)**: Will set the measurement name to the provided string. +* **measurement_name_path (OPTIONAL)**: You can define a query with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md) to set a measurement name from the JSON input. The query must return a single data value or it will use the default measurement name. This takes precedence over `measurement_name`. +* **timestamp_path (OPTIONAL)**: You can define a query with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md) to set a timestamp from the JSON input. The query must return a single data value or it will default to the current time. +* **timestamp_format (OPTIONAL, but REQUIRED when timestamp_query is defined**: Must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or +the Go "reference time" which is defined to be the specific time: +`Mon Jan 2 15:04:05 MST 2006` +* **timestamp_timezone (OPTIONAL, but REQUIRES timestamp_query**: This option should be set to a +[Unix TZ value](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), +such as `America/New_York`, to `Local` to utilize the system timezone, or to `UTC`. Defaults to `UTC` + +--- + +### `field` and `tag` config options + +`field` and `tag` represent the elements of [line protocol][lp-ref]. You can use +the `field` and `tag` config tables to gather a single value or an array of +values that all share the same type and name. With this you can add a field or +tag to a line protocol from data stored anywhere in your JSON. If you define the +GJSON path to return a single value then you will get a single resutling line +protocol that contains the field/tag. If you define the GJSON path to return an +array of values, then each field/tag will be put into a separate line protocol +(you use the # character to retrieve JSON arrays, find examples +[here](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md#arrays)). + +Note that objects are handled separately, therefore if you provide a path that +returns a object it will be ignored. You will need use the `object` config table +to parse objects, because `field` and `tag` doesn't handle relationships between +data. Each `field` and `tag` you define is handled as a separate data point. + +The notable difference between `field` and `tag`, is that `tag` values will +always be type string while `field` can be multiple types. You can define the +type of `field` to be any [type that line protocol supports][types], which are: + +* float +* int +* uint +* string +* bool + + +#### **field** + +Using this field configuration you can gather a non-array/non-object +values. Note this acts as a global field when used with the `object` +configuration, if you gather an array of values using `object` then the field +gathered will be added to each resulting line protocol without acknowledging its +location in the original JSON. This is defined in TOML as an array table using +double brackets. + +* **path (REQUIRED)**: A string with valid GJSON path syntax to a non-array/non-object value +* **name (OPTIONAL)**: You can define a string value to set the field name. If not defined it will use the trailing word from the provided query. +* **type (OPTIONAL)**: You can define a string value to set the desired type (float, int, uint, string, bool). If not defined it won't enforce a type and default to using the original type defined in the JSON (bool, float, or string). +* **optional (OPTIONAL)**: Setting optional to true will suppress errors if the configured Path doesn't match the JSON. This should be used with caution because it removes the safety net of verifying the provided path. An example case to use this is with the `inputs.mqtt_consumer` plugin when you are expecting multiple JSON files. + +#### **tag** + +Using this tag configuration you can gather a non-array/non-object values. Note +this acts as a global tag when used with the `object` configuration, if you +gather an array of values using `object` then the tag gathered will be added to +each resulting line protocol without acknowledging its location in the original +JSON. This is defined in TOML as an array table using double brackets. + +* **path (REQUIRED)**: A string with valid GJSON path syntax to a non-array/non-object value +* **name (OPTIONAL)**: You can define a string value to set the field name. If not defined it will use the trailing word from the provided query. +* **optional (OPTIONAL)**: Setting optional to true will suppress errors if the configured Path doesn't match the JSON. This should be used with caution because it removes the safety net of verifying the provided path. An example case to use this is with the `inputs.mqtt_consumer` plugin when you are expecting multiple JSON files. + +For good examples in using `field` and `tag` you can reference the following +example configs: + +--- + +### object + +With the configuration section `object`, you can gather values from [JSON +objects](https://www.w3schools.com/js/js_json_objects.asp). This is defined in +TOML as an array table using double brackets. + +#### The following keys can be set for `object` + +* **path (REQUIRED)**: You must define the path query that gathers the object with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md) +* **optional (OPTIONAL)**: Setting optional to true will suppress errors if the configured Path doesn't match the JSON. This should be used with caution because it removes the safety net of verifying the provided path. An example case to use this is with the `inputs.mqtt_consumer` plugin when you are expecting multiple JSON files. + +*Keys to define what JSON keys should be used as timestamps:* + +* **timestamp_key(OPTIONAL)**: You can define a json key (for a nested key, prepend the parent keys with underscores) for the value to be set as the timestamp from the JSON input. +* **timestamp_format (OPTIONAL, but REQUIRED when timestamp_query is defined**: Must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or +the Go "reference time" which is defined to be the specific time: +`Mon Jan 2 15:04:05 MST 2006` +* **timestamp_timezone (OPTIONAL, but REQUIRES timestamp_query**: This option should be set to a +[Unix TZ value](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), +such as `America/New_York`, to `Local` to utilize the system timezone, or to `UTC`. Defaults to `UTC` + +*Configuration to define what JSON keys should be included and how (field/tag):* + +* **included_keys (OPTIONAL)**: You can define a list of key's that should be the only data included in the line protocol, by default it will include everything. +* **excluded_keys (OPTIONAL)**: You can define json keys to be excluded in the line protocol, for a nested key, prepend the parent keys with underscores +* **tags (OPTIONAL)**: You can define json keys to be set as tags instead of fields, if you define a key that is an array or object then all nested values will become a tag +* **field (OPTIONAL, defined in TOML as an array table using double brackets)**: Identical to the [field](#field) table you can define, but with two key differences. The path supports arrays and objects and is defined under the object table and therefore will adhere to how the JSON is structured. You want to use this if you want the field/tag to be added as it would if it were in the included_key list, but then use the GJSON path syntax. +* **tag (OPTIONAL, defined in TOML as an array table using double brackets)**: Identical to the [tag](#tag) table you can define, but with two key differences. The path supports arrays and objects and is defined under the object table and therefore will adhere to how the JSON is structured. You want to use this if you want the field/tag to be added as it would if it were in the included_key list, but then use the GJSON path syntax. + +*Configuration to modify the resutling line protocol:* + +* **disable_prepend_keys (OPTIONAL)**: Set to true to prevent resulting nested data to contain the parent key prepended to its key **NOTE**: duplicate names can overwrite each other when this is enabled +* **renames (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired name (oppossed to defaulting to using the key), use names that include the prepended keys of its parent keys for nested results +* **fields (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired type (int,string,bool,float), if you define a key that is an array or object then all nested values will become that type + +## Arrays and Objects + +The following describes the high-level approach when parsing arrays and objects: + +**Array**: Every element in an array is treated as a *separate* line protocol + +**Object**: Every key/value in a object is treated as a *single* line protocol + +When handling nested arrays and objects, these above rules continue to apply as +the parser creates line protocol. When an object has multiple array's as values, +the array's will become separate line protocol containing only non-array values +from the obejct. Below you can see an example of this behavior, with an input +json containing an array of book objects that has a nested array of characters. + +Example JSON: + +```json +{ + "book": { + "title": "The Lord Of The Rings", + "chapters": [ + "A Long-expected Party", + "The Shadow of the Past" + ], + "author": "Tolkien", + "characters": [ + { + "name": "Bilbo", + "species": "hobbit" + }, + { + "name": "Frodo", + "species": "hobbit" + } + ], + "random": [ + 1, + 2 + ] + } +} + +``` + +Example configuration: + +```toml +[[inputs.file]] + files = ["./testdata/multiple_arrays_in_object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "book" + tags = ["title"] + disable_prepend_keys = true +``` + +Expected line protocol: + +```text +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",name="Bilbo",species="hobbit" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",name="Frodo",species="hobbit" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",random=1 +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",random=2 + +``` + +You can find more complicated examples under the folder `testdata`. + +## Types + +For each field you have the option to define the types. The following rules are +in place for this configuration: + +* If a type is explicitly defined, the parser will enforce this type and convert the data to the defined type if possible. If the type can't be converted then the parser will fail. +* If a type isn't defined, the parser will use the default type defined in the JSON (int, float, string) + +The type values you can set: + +* `int`, bool, floats or strings (with valid numbers) can be converted to a int. +* `uint`, bool, floats or strings (with valid numbers) can be converted to a uint. +* `string`, any data can be formatted as a string. +* `float`, string values (with valid numbers) or integers can be converted to a float. +* `bool`, the string values "true" or "false" (regardless of capitalization) or the integer values `0` or `1` can be turned to a bool. diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go new file mode 100644 index 00000000..d5e2e4cf --- /dev/null +++ b/plugins/parsers/json_v2/parser.go @@ -0,0 +1,689 @@ +package json_v2 + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/circonus-labs/circonus-unified-agent/cua" + "github.com/circonus-labs/circonus-unified-agent/internal" + "github.com/circonus-labs/circonus-unified-agent/metric" + + "github.com/tidwall/gjson" +) + +// Parser adheres to the parser interface, contains the parser configuration, and data required to parse JSON +type Parser struct { + // These struct fields are common for a parser + Configs []Config + DefaultTags map[string]string + Log cua.Logger + + // **** The struct fields bellow this comment are used for processing indvidual configs **** + + // measurementName is the the name of the current config used in each line protocol + measurementName string + + // **** Specific for object configuration **** + // subPathResults contains the results of sub-gjson path expressions provided in fields/tags table within object config + subPathResults []PathResult + // iterateObjects dictates if ExpandArray function will handle objects + iterateObjects bool + // objectConfig contains the config for an object, some info is needed while iterating over the gjson results + objectConfig JSONObject +} + +type PathResult struct { + result gjson.Result + tag bool + DataSet +} + +type Config struct { + MeasurementName string `toml:"measurement_name"` // OPTIONAL + MeasurementNamePath string `toml:"measurement_name_path"` // OPTIONAL + TimestampPath string `toml:"timestamp_path"` // OPTIONAL + TimestampFormat string `toml:"timestamp_format"` // OPTIONAL, but REQUIRED when timestamp_path is defined + TimestampTimezone string `toml:"timestamp_timezone"` // OPTIONAL, but REQUIRES timestamp_path + + Fields []DataSet + Tags []DataSet + JSONObjects []JSONObject +} + +type DataSet struct { + Path string `toml:"path"` // REQUIRED + Type string `toml:"type"` // OPTIONAL, can't be set for tags they will always be a string + Rename string `toml:"rename"` + Optional bool `toml:"optional"` // Will suppress errors if there isn't a match with Path +} + +type JSONObject struct { + Path string `toml:"path"` // REQUIRED + Optional bool `toml:"optional"` // Will suppress errors if there isn't a match with Path + TimestampKey string `toml:"timestamp_key"` + TimestampFormat string `toml:"timestamp_format"` // OPTIONAL, but REQUIRED when timestamp_path is defined + TimestampTimezone string `toml:"timestamp_timezone"` // OPTIONAL, but REQUIRES timestamp_path + Renames map[string]string `toml:"renames"` + Fields map[string]string `toml:"fields"` + Tags []string `toml:"tags"` + IncludedKeys []string `toml:"included_keys"` + ExcludedKeys []string `toml:"excluded_keys"` + DisablePrependKeys bool `toml:"disable_prepend_keys"` + FieldPaths []DataSet + TagPaths []DataSet +} + +type MetricNode struct { + ParentIndex int + OutputName string + SetName string + Tag bool + DesiredType string // Can be "int", "uint", "float", "bool", "string" + /* + IncludeCollection is only used when processing objects and is responsible for containing the gjson results + found by the gjson paths provided in the FieldPaths and TagPaths configs. + */ + IncludeCollection *PathResult + + Metric cua.Metric + gjson.Result +} + +func (p *Parser) Parse(input []byte) ([]cua.Metric, error) { + // Only valid JSON is supported + if !gjson.Valid(string(input)) { + return nil, fmt.Errorf("invalid JSON provided, unable to parse") + } + + var metrics []cua.Metric + + for _, c := range p.Configs { + // Measurement name can either be hardcoded, or parsed from the JSON using a GJSON path expression + p.measurementName = c.MeasurementName + if c.MeasurementNamePath != "" { + result := gjson.GetBytes(input, c.MeasurementNamePath) + if !result.IsArray() && !result.IsObject() { + p.measurementName = result.String() + } + } + + // timestamp defaults to current time, or can be parsed from the JSON using a GJSON path expression + timestamp := time.Now() + if c.TimestampPath != "" { + result := gjson.GetBytes(input, c.TimestampPath) + + if result.Type == gjson.Null { + p.Log.Debugf("Message: %s", input) + return nil, fmt.Errorf("The timestamp path %s returned NULL", c.TimestampPath) + } + if !result.IsArray() && !result.IsObject() { + if c.TimestampFormat == "" { + err := fmt.Errorf("use of 'timestamp_query' requires 'timestamp_format'") + return nil, err + } + + var err error + timestamp, err = internal.ParseTimestamp(c.TimestampFormat, result.String(), c.TimestampTimezone) + + if err != nil { + return nil, err + } + } + } + + fields, err := p.processMetric(input, c.Fields, false, timestamp) + if err != nil { + return nil, err + } + + tags, err := p.processMetric(input, c.Tags, true, timestamp) + if err != nil { + return nil, err + } + + objects, err := p.processObjects(input, c.JSONObjects, timestamp) + if err != nil { + return nil, err + } + + metrics = append(metrics, cartesianProduct(tags, fields)...) + + if len(objects) != 0 && len(metrics) != 0 { + metrics = cartesianProduct(objects, metrics) + } else { + metrics = append(metrics, objects...) + } + } + + for k, v := range p.DefaultTags { + for _, t := range metrics { + t.AddTag(k, v) + } + } + + return metrics, nil +} + +// processMetric will iterate over all 'field' or 'tag' configs and create metrics for each +// A field/tag can either be a single value or an array of values, each resulting in its own metric +// For multiple configs, a set of metrics is created from the cartesian product of each separate config +func (p *Parser) processMetric(input []byte, data []DataSet, tag bool, timestamp time.Time) ([]cua.Metric, error) { + if len(data) == 0 { + return nil, nil + } + + p.iterateObjects = false + var metrics [][]cua.Metric + + for _, c := range data { + if c.Path == "" { + return nil, fmt.Errorf("GJSON path is required") + } + result := gjson.GetBytes(input, c.Path) + if skip, err := p.checkResult(result, c.Path, c.Optional); err != nil { + if skip { + continue + } + return nil, err + } + + if result.IsObject() { + p.Log.Debugf("Found object in the path: %s, ignoring it please use 'object' to gather metrics from objects", c.Path) + continue + } + + setName := c.Rename + // Default to the last path word, should be the upper key name + if setName == "" { + s := strings.Split(c.Path, ".") + setName = s[len(s)-1] + } + setName = strings.ReplaceAll(setName, " ", "_") + + mNodeMetric, err := metric.New( + p.measurementName, + map[string]string{}, + map[string]interface{}{}, + timestamp, + ) + + if err != nil { + return nil, fmt.Errorf("new metric: %w", err) + } + + mNode := MetricNode{ + OutputName: setName, + SetName: setName, + DesiredType: c.Type, + Tag: tag, + Metric: mNodeMetric, + Result: result, + ParentIndex: result.Index, + } + + // Expand all array's and nested arrays into separate metrics + nodes, err := p.expandArray(mNode, timestamp) + if err != nil { + return nil, err + } + + metrics = append(metrics, nodes) + } + + for i := 1; i < len(metrics); i++ { + metrics[i] = cartesianProduct(metrics[i-1], metrics[i]) + } + + if len(metrics) == 0 { + return nil, nil + } + + return metrics[len(metrics)-1], nil +} + +func cartesianProduct(a, b []cua.Metric) []cua.Metric { + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + p := make([]cua.Metric, len(a)*len(b)) + i := 0 + for _, a := range a { + for _, b := range b { + m := a.Copy() + mergeMetric(b, m) + p[i] = m + i++ + } + } + + return p +} + +func mergeMetric(a cua.Metric, m cua.Metric) { + for _, f := range a.FieldList() { + m.AddField(f.Key, f.Value) + } + for _, t := range a.TagList() { + m.AddTag(t.Key, t.Value) + } +} + +// expandArray will recursively create a new MetricNode for each element in a JSON array or single value +func (p *Parser) expandArray(result MetricNode, timestamp time.Time) ([]cua.Metric, error) { + var results []cua.Metric + + if result.IsObject() { + if !p.iterateObjects { + p.Log.Debugf("Found object in query ignoring it please use 'object' to gather metrics from objects") + return results, nil + } + r, err := p.combineObject(result, timestamp) + if err != nil { + return nil, err + } + results = append(results, r...) + return results, nil + } + + if result.IsArray() { + var err error + if result.IncludeCollection == nil && (len(p.objectConfig.FieldPaths) > 0 || len(p.objectConfig.TagPaths) > 0) { + result.IncludeCollection = p.existsInpathResults(result.Index) + } + result.ForEach(func(_, val gjson.Result) bool { + m, _ := metric.New( + p.measurementName, + map[string]string{}, + map[string]interface{}{}, + timestamp, + ) + if val.IsObject() { + n := result + n.Metric = m + n.Result = val + n.Index = val.Index - result.Index + n.ParentIndex = n.Index + result.ParentIndex + r, err := p.combineObject(n, timestamp) + if err != nil { + return false + } + + results = append(results, r...) + if len(results) != 0 { + for _, newResult := range results { + mergeMetric(result.Metric, newResult) + } + } + return true + } + + mergeMetric(result.Metric, m) + n := result + n.Metric = m + n.Result = val + n.Index = val.Index - result.Index + n.ParentIndex = n.Index + result.ParentIndex + r, err := p.expandArray(n, timestamp) + if err != nil { + return false + } + results = append(results, r...) + return true + }) + if err != nil { + return nil, err + } + } else { + if p.objectConfig.TimestampKey != "" && result.SetName == p.objectConfig.TimestampKey { + if p.objectConfig.TimestampFormat == "" { + err := fmt.Errorf("use of 'timestamp_query' requires 'timestamp_format'") + return nil, err + } + timestamp, err := internal.ParseTimestamp(p.objectConfig.TimestampFormat, result.String(), p.objectConfig.TimestampTimezone) + if err != nil { + return nil, err + } + result.Metric.SetTime(timestamp) + } else { + switch result.Value().(type) { + case nil: // Ignore JSON values that are set as null + default: + outputName := result.OutputName + desiredType := result.DesiredType + + if len(p.objectConfig.FieldPaths) > 0 || len(p.objectConfig.TagPaths) > 0 { + var pathResult *PathResult + // When IncludeCollection isn't nil, that means the current result is included in the collection. + if result.IncludeCollection != nil { + pathResult = result.IncludeCollection + } else { + // Verify that the result should be included based on the results of fieldpaths and tag paths + pathResult = p.existsInpathResults(result.ParentIndex) + } + if pathResult == nil { + return results, nil + } + if pathResult.tag { + result.Tag = true + } + if !pathResult.tag { + desiredType = pathResult.Type + } + if pathResult.Rename != "" { + outputName = pathResult.Rename + } + } + + if result.Tag { + desiredType = "string" + } + v, err := p.convertType(result.Result, desiredType, result.SetName) + if err != nil { + return nil, err + } + if result.Tag { + result.Metric.AddTag(outputName, v.(string)) + } else { + result.Metric.AddField(outputName, v) + } + } + } + + results = append(results, result.Metric) + } + + return results, nil +} + +func (p *Parser) existsInpathResults(index int) *PathResult { + for _, f := range p.subPathResults { + if f.result.Index == index { + return &f + } + + // Indexes will be populated with all the elements that match on a `#(...)#` query + for _, i := range f.result.Indexes { + if i == index { + return &f + } + } + } + return nil +} + +// processObjects will iterate over all 'object' configs and create metrics for each +func (p *Parser) processObjects(input []byte, objects []JSONObject, timestamp time.Time) ([]cua.Metric, error) { + p.iterateObjects = true + var t []cua.Metric + for _, c := range objects { + p.objectConfig = c + + if c.Path == "" { + return nil, fmt.Errorf("GJSON path is required") + } + + result := gjson.GetBytes(input, c.Path) + if skip, err := p.checkResult(result, c.Path, c.Optional); err != nil { + if skip { + continue + } + return nil, err + } + + scopedJSON := []byte(result.Raw) + for _, f := range c.FieldPaths { + var r PathResult + r.result = gjson.GetBytes(scopedJSON, f.Path) + if skip, err := p.checkResult(r.result, f.Path, f.Optional); err != nil { + if skip { + continue + } + return nil, err + } + r.DataSet = f + p.subPathResults = append(p.subPathResults, r) + } + + for _, f := range c.TagPaths { + var r PathResult + r.result = gjson.GetBytes(scopedJSON, f.Path) + if skip, err := p.checkResult(r.result, f.Path, f.Optional); err != nil { + if skip { + continue + } + return nil, err + } + r.DataSet = f + r.tag = true + p.subPathResults = append(p.subPathResults, r) + } + + rootObjectMetric, _ := metric.New( + p.measurementName, + map[string]string{}, + map[string]interface{}{}, + timestamp, + ) + + rootObject := MetricNode{ + Metric: rootObjectMetric, + Result: result, + ParentIndex: 0, + } + + metrics, err := p.expandArray(rootObject, timestamp) + if err != nil { + return nil, err + } + t = append(t, metrics...) + } + + return t, nil +} + +// combineObject will add all fields/tags to a single metric +// If the object has multiple array's as elements it won't comine those, they will remain separate metrics +func (p *Parser) combineObject(result MetricNode, timestamp time.Time) ([]cua.Metric, error) { + var results []cua.Metric + if result.IsArray() || result.IsObject() { + var err error + result.ForEach(func(key, val gjson.Result) bool { + // Determine if field/tag set name is configured + var setName string + if result.SetName != "" { + setName = result.SetName + "_" + strings.ReplaceAll(key.String(), " ", "_") + } else { + setName = strings.ReplaceAll(key.String(), " ", "_") + } + + if p.isExcluded(setName) || !p.isIncluded(setName, val) { + return true + } + + var outputName string + if p.objectConfig.DisablePrependKeys { + outputName = strings.ReplaceAll(key.String(), " ", "_") + } else { + outputName = setName + } + for k, n := range p.objectConfig.Renames { + if k == setName { + outputName = n + break + } + } + + arrayNode := result + arrayNode.ParentIndex += val.Index + arrayNode.OutputName = outputName + arrayNode.SetName = setName + arrayNode.Result = val + + for k, t := range p.objectConfig.Fields { + if setName == k { + arrayNode.DesiredType = t + break + } + } + + tag := false + for _, t := range p.objectConfig.Tags { + if setName == t { + tag = true + break + } + } + + arrayNode.Tag = tag + + if val.IsObject() { + results, err = p.combineObject(arrayNode, timestamp) + if err != nil { + return false + } + } else { + arrayNode.Index -= result.Index + arrayNode.ParentIndex -= result.Index + r, err := p.expandArray(arrayNode, timestamp) + if err != nil { + return false + } + results = cartesianProduct(r, results) + } + + return true + }) + + if err != nil { + return nil, err + } + } + return results, nil +} + +func (p *Parser) isIncluded(key string, val gjson.Result) bool { + if len(p.objectConfig.IncludedKeys) == 0 { + return true + } + // automatically adds tags to included_keys so it does NOT have to be repeated in the config + allKeys := append(p.objectConfig.IncludedKeys, p.objectConfig.Tags...) + for _, i := range allKeys { + if i == key { + return true + } + if val.IsArray() || val.IsObject() { + // Check if the included key is a sub element + if strings.HasPrefix(i, key) { + return true + } + } + } + return false +} + +func (p *Parser) isExcluded(key string) bool { + for _, i := range p.objectConfig.ExcludedKeys { + if i == key { + return true + } + } + return false +} + +func (p *Parser) ParseLine(line string) (cua.Metric, error) { + return nil, fmt.Errorf("ParseLine is designed for parsing influx line protocol, therefore not implemented for parsing JSON") +} + +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + +// convertType will convert the value parsed from the input JSON to the specified type in the config +func (p *Parser) convertType(input gjson.Result, desiredType string, name string) (interface{}, error) { + switch inputType := input.Value().(type) { + case string: + switch desiredType { + case "uint": + r, err := strconv.ParseUint(inputType, 10, 64) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type uint: %v", name, err) + } + return r, nil + case "int": + r, err := strconv.ParseInt(inputType, 10, 64) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type int: %v", name, err) + } + return r, nil + case "float": + r, err := strconv.ParseFloat(inputType, 64) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type float: %v", name, err) + } + return r, nil + case "bool": + r, err := strconv.ParseBool(inputType) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type bool: %v", name, err) + } + return r, nil + } + case bool: + switch desiredType { + case "string": + return strconv.FormatBool(inputType), nil + case "int": + if inputType { + return int64(1), nil + } + + return int64(0), nil + case "uint": + if inputType { + return uint64(1), nil + } + + return uint64(0), nil + } + case float64: + switch desiredType { + case "string": + return fmt.Sprint(inputType), nil + case "int": + return input.Int(), nil + case "uint": + return input.Uint(), nil + case "bool": + if inputType == 0 { + return false, nil + } else if inputType == 1 { + return true, nil + } else { + return nil, fmt.Errorf("Unable to convert field '%s' to type bool", name) + } + } + default: + return nil, fmt.Errorf("unknown format '%T' for field '%s'", inputType, name) + } + + return input.Value(), nil +} + +func (p *Parser) checkResult(result gjson.Result, path string, optional bool) (bool, error) { + if !result.Exists() { + if optional { + // If path is marked as optional don't error if path doesn't return a result + p.Log.Debugf("the path %s doesn't exist", path) + return true, nil + } + + return false, fmt.Errorf("the path %s doesn't exist", path) + } + + return false, nil +} diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go new file mode 100644 index 00000000..878c6ede --- /dev/null +++ b/plugins/parsers/json_v2/parser_test.go @@ -0,0 +1,113 @@ +package json_v2_test + +// import ( +// "bufio" +// "fmt" +// "io/ioutil" +// "os" +// "strings" +// "testing" + +// "github.com/circonus-labs/circonus-unified-agent/config" +// "github.com/circonus-labs/circonus-unified-agent/cua" +// "github.com/circonus-labs/circonus-unified-agent/plugins/inputs" +// "github.com/circonus-labs/circonus-unified-agent/plugins/inputs/file" +// "github.com/circonus-labs/circonus-unified-agent/plugins/parsers/influx" +// "github.com/stretchr/testify/require" +// ) + +// func TestMultipleConfigs(t *testing.T) { +// // Get all directories in testdata +// folders, err := ioutil.ReadDir("testdata") +// require.NoError(t, err) +// // Make sure testdata contains data +// require.Greater(t, len(folders), 0) + +// expectedErrors := []struct { +// Name string +// Error string +// }{ +// { +// Name: "wrong_path", +// Error: "wrong", +// }, +// } + +// for _, f := range folders { +// t.Run(f.Name(), func(t *testing.T) { +// // Process the telegraf config file for the test +// buf, err := os.ReadFile(fmt.Sprintf("testdata/%s/circonus-unified-agent.conf", f.Name())) +// require.NoError(t, err) +// inputs.Add("file", func() cua.Input { +// return &file.File{} +// }) +// cfg := config.NewConfig() +// err = cfg.LoadConfigData(buf) +// require.NoError(t, err) + +// for _, input := range cfg.Inputs { +// err = input.Init() +// require.NoError(t, err) +// // err = input.Gather(&acc) +// // If the test has an expected error then require one was received +// var expectedError bool +// for _, e := range expectedErrors { +// if e.Name == f.Name() { +// require.Contains(t, err.Error(), e.Error) +// expectedError = true +// break +// } +// } +// if !expectedError { +// require.NoError(t, err) +// } +// } + +// // Process expected metrics and compare with resulting metrics +// // expectedOutputs, err := readMetricFile(fmt.Sprintf("testdata/%s/expected.out", f.Name())) +// require.NoError(t, err) +// // resultingMetrics := acc.GetTelegrafMetrics() +// // testutil.RequireMetricsEqual(t, expectedOutputs, resultingMetrics, testutil.IgnoreTime()) + +// // Folder with timestamp prefixed will also check for matching timestamps to make sure they are parsed correctly +// // The milliseconds weren't matching, seemed like a rounding difference between the influx parser +// // Compares each metrics times separately and ignores milliseconds +// if strings.HasPrefix(f.Name(), "timestamp") { +// // require.Equal(t, len(expectedOutputs), len(resultingMetrics)) +// // for i, m := range resultingMetrics { +// // require.Equal(t, expectedOutputs[i].Time().Truncate(time.Second), m.Time().Truncate(time.Second)) +// // } +// } +// }) +// } +// } + +// func readMetricFile(path string) ([]cua.Metric, error) { +// var metrics []cua.Metric +// expectedFile, err := os.Open(path) +// if err != nil { +// return metrics, err +// } +// defer expectedFile.Close() + +// parser := influx.NewParser(influx.NewMetricHandler()) +// scanner := bufio.NewScanner(expectedFile) +// for scanner.Scan() { +// line := scanner.Text() +// if line != "" { +// m, err := parser.ParseLine(line) +// // The timezone needs to be UTC to match the timestamp test results +// m.SetTime(m.Time().UTC()) +// if err != nil { +// return nil, fmt.Errorf("unable to parse metric in %q failed: %v", line, err) +// } +// metrics = append(metrics, m) +// } +// } +// err = expectedFile.Close() +// if err != nil { +// return metrics, err +// } + +// return metrics, nil +// } diff --git a/plugins/parsers/json_v2/testdata/10670/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/10670/circonus-unified-agent.conf new file mode 100644 index 00000000..09cd18e3 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/10670/circonus-unified-agent.conf @@ -0,0 +1,26 @@ + + +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/10670/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "weight" + timestamp_format = "unix_ms" + timestamp_path = "weight_createdAt" + [[inputs.file.json_v2.field]] + path = "weight_weight" + rename = "weight" + type = "float" + [[inputs.file.json_v2.tag]] + path = "weight_serialNumber" + rename = "serial_number" + [[inputs.file.json_v2.tag]] + path = "weight_ROWKEY" + rename = "imei" + [[inputs.file.json_v2.tag]] + path = "sensor_customer_name" + rename = "customer_name" + [[inputs.file.json_v2.tag]] + path = "sensor_distributor_name" + rename = "distributor_name" diff --git a/plugins/parsers/json_v2/testdata/10670/expected.out b/plugins/parsers/json_v2/testdata/10670/expected.out new file mode 100644 index 00000000..3a6af7b4 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/10670/expected.out @@ -0,0 +1 @@ +weight,customer_name=Customer,imei=123,serial_number=AX00 weight=289.799 diff --git a/plugins/parsers/json_v2/testdata/10670/input.json b/plugins/parsers/json_v2/testdata/10670/input.json new file mode 100644 index 00000000..f3d12946 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/10670/input.json @@ -0,0 +1,10 @@ +{ + "weight_ROWKEY": "123", + "weight_serialNumber": "AX00", + "weight_createdAt": 1644708158939, + "weight_weight": 289.799, + "sensor_imei": "123", + "sensor_distributor_name": null, + "sensor_customer_name": "Customer", + "sensor_dist_name": null +} diff --git a/plugins/parsers/json_v2/testdata/array_of_objects/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/array_of_objects/circonus-unified-agent.conf new file mode 100644 index 00000000..814bdefe --- /dev/null +++ b/plugins/parsers/json_v2/testdata/array_of_objects/circonus-unified-agent.conf @@ -0,0 +1,10 @@ + + +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/array_of_objects/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "features" + diff --git a/plugins/parsers/json_v2/testdata/array_of_objects/expected.out b/plugins/parsers/json_v2/testdata/array_of_objects/expected.out new file mode 100644 index 00000000..75f9e5e4 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/array_of_objects/expected.out @@ -0,0 +1,2 @@ +file properties_mag=5.17 +file properties_mag=6.2 diff --git a/plugins/parsers/json_v2/testdata/array_of_objects/input.json b/plugins/parsers/json_v2/testdata/array_of_objects/input.json new file mode 100644 index 00000000..6b43061b --- /dev/null +++ b/plugins/parsers/json_v2/testdata/array_of_objects/input.json @@ -0,0 +1,14 @@ +{ + "features": [ + { + "properties": { + "mag": 5.17 + } + }, + { + "properties": { + "mag": 6.2 + } + } + ] +} diff --git a/plugins/parsers/json_v2/testdata/complex_nesting/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/complex_nesting/circonus-unified-agent.conf new file mode 100644 index 00000000..2800ade7 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/complex_nesting/circonus-unified-agent.conf @@ -0,0 +1,10 @@ +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/complex_nesting/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "features" + timestamp_key = "properties_time" + timestamp_format = "unix_ms" + tags = ["properties_place"] diff --git a/plugins/parsers/json_v2/testdata/complex_nesting/expected.out b/plugins/parsers/json_v2/testdata/complex_nesting/expected.out new file mode 100644 index 00000000..265549c5 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/complex_nesting/expected.out @@ -0,0 +1,3 @@ +file,properties_place=Antelope\ Valley\,\ CA geometry_coordinates=-119.4998333,geometry_type="Point",id="nc73584926",properties_mag=6,properties_updated=1.626277167263e+12,type="Feature" +file,properties_place=Antelope\ Valley\,\ CA geometry_coordinates=38.5075,geometry_type="Point",id="nc73584926",properties_mag=6,properties_updated=1.626277167263e+12,type="Feature" +file,properties_place=Antelope\ Valley\,\ CA geometry_coordinates=7.45,geometry_type="Point",id="nc73584926",properties_mag=6,properties_updated=1.626277167263e+12,type="Feature" diff --git a/plugins/parsers/json_v2/testdata/complex_nesting/input.json b/plugins/parsers/json_v2/testdata/complex_nesting/input.json new file mode 100644 index 00000000..69bff40a --- /dev/null +++ b/plugins/parsers/json_v2/testdata/complex_nesting/input.json @@ -0,0 +1,31 @@ +{ + "type": "FeatureCollection", + "metadata": { + "generated": 1626285886000, + "url": "https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/significant_week.geojson", + "title": "USGS Significant Earthquakes, Past Week", + "status": 200, + "api": "1.10.3", + "count": 1 + }, + "features": [ + { + "type": "Feature", + "properties": { + "mag": 6, + "place": "Antelope Valley, CA", + "time": 1625784588110, + "updated": 1626277167263 + }, + "geometry": { + "type": "Point", + "coordinates": [ + -119.4998333, + 38.5075, + 7.45 + ] + }, + "id": "nc73584926" + } + ] +} diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/fields_and_tags/circonus-unified-agent.conf new file mode 100644 index 00000000..cb761f39 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags/circonus-unified-agent.conf @@ -0,0 +1,15 @@ + + +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/fields_and_tags/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.tag]] + path = "fields.status" + [[inputs.file.json_v2.field]] + path = "fields.json.#.duration" + rename = "json_duration" + [[inputs.file.json_v2.field]] + path = "fields.duration" + type = "int" diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags/expected.out b/plugins/parsers/json_v2/testdata/fields_and_tags/expected.out new file mode 100644 index 00000000..2b7f6c16 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags/expected.out @@ -0,0 +1,2 @@ +file,status=200 duration=2i,json_duration=100 +file,status=200 duration=2i,json_duration=60 diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags/input.json b/plugins/parsers/json_v2/testdata/fields_and_tags/input.json new file mode 100644 index 00000000..e8be29f9 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags/input.json @@ -0,0 +1,46 @@ +{ + "message": "abc", + "fields": { + "status": 200, + "key": 1, + "json": [ + { + "duration": 100, + "code": 1, + "label": 2, + "line": 3, + "many": 4, + "more": 5, + "numerical": 6, + "fields": 7, + "nest": { + "label": 2, + "line": 3, + "many": 4, + "more": 5, + "numerical": 4, + "fields": 7 + } + }, + { + "duration": 60, + "code": 1, + "label": 2, + "line": 3, + "many": 4, + "more": 5, + "numerical": 6, + "fields": 7, + "nest": { + "label": 2, + "line": 3, + "many": 4, + "more": 5, + "numerical": 6, + "fields": 7 + } + } + ], + "duration": 2 + } +} diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags_complex/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/circonus-unified-agent.conf new file mode 100644 index 00000000..1df9d677 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/circonus-unified-agent.conf @@ -0,0 +1,11 @@ +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/fields_and_tags_complex/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "bart_json_v2" + [[inputs.file.json_v2.tag]] + path = "root.station.#.name" + [[inputs.file.json_v2.field]] + path = "root.station.#.etd.#.estimate.#.minutes" + type = "int" diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags_complex/expected.out b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/expected.out new file mode 100644 index 00000000..02edaba4 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/expected.out @@ -0,0 +1,5 @@ +bart_json_v2,name=Powell\ St. minutes=9i +bart_json_v2,name=Powell\ St. minutes=40i +bart_json_v2,name=Powell\ St. minutes=70i +bart_json_v2,name=Powell\ St. minutes=12i +bart_json_v2,name=Powell\ St. minutes=42i diff --git a/plugins/parsers/json_v2/testdata/fields_and_tags_complex/input.json b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/input.json new file mode 100644 index 00000000..15a0dab9 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/fields_and_tags_complex/input.json @@ -0,0 +1,87 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=POWL&json=y" + }, + "date": "06/03/2021", + "time": "09:46:01 AM PDT", + "station": [ + { + "name": "Powell St.", + "abbr": "POWL", + "etd": [ + { + "destination": "Antioch", + "abbreviation": "ANTC", + "limited": "0", + "estimate": [ + { + "minutes": "9", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "40", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "70", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Berryessa", + "abbreviation": "BERY", + "limited": "0", + "estimate": [ + { + "minutes": "12", + "platform": "2", + "direction": "North", + "length": "10", + "color": "GREEN", + "hexcolor": "#339933", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "42", + "platform": "2", + "direction": "North", + "length": "10", + "color": "GREEN", + "hexcolor": "#339933", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/large_numbers/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/large_numbers/circonus-unified-agent.conf new file mode 100644 index 00000000..a297b133 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/large_numbers/circonus-unified-agent.conf @@ -0,0 +1,23 @@ +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/large_numbers/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "int" + [inputs.file.json_v2.object.fields] + large = "int" + larger = "int" + largest = "int" + [[inputs.file.json_v2.object]] + path = "uint" + [inputs.file.json_v2.object.fields] + large = "uint" + larger = "uint" + largest = "uint" + [[inputs.file.json_v2.object]] + path = "float" + [inputs.file.json_v2.object.fields] + large = "float" + larger = "float" + largest = "float" diff --git a/plugins/parsers/json_v2/testdata/large_numbers/expected.out b/plugins/parsers/json_v2/testdata/large_numbers/expected.out new file mode 100644 index 00000000..1edb0565 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/large_numbers/expected.out @@ -0,0 +1,3 @@ +file large=4294967296i,larger=9007199254740991i,largest=9223372036854775807i +file large=9007199254740991u,larger=9223372036854775807u,largest=18446744073709551615u +file large=4294967296,larger=4.294967296663e+09,largest=9007199254740991 diff --git a/plugins/parsers/json_v2/testdata/large_numbers/input.json b/plugins/parsers/json_v2/testdata/large_numbers/input.json new file mode 100644 index 00000000..a800d0cd --- /dev/null +++ b/plugins/parsers/json_v2/testdata/large_numbers/input.json @@ -0,0 +1,17 @@ +{ + "int": { + "large": 4294967296, + "larger": 9007199254740991, + "largest": 9223372036854775807 + }, + "uint": { + "large": 9007199254740991, + "larger": 9223372036854775807, + "largest": 18446744073709551615 + }, + "float": { + "large": 4294967296, + "larger": 4.294967296663e+09, + "largest": 9007199254740991 + } +} diff --git a/plugins/parsers/json_v2/testdata/measurement_name_int/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/measurement_name_int/circonus-unified-agent.conf new file mode 100644 index 00000000..bc8289f2 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/measurement_name_int/circonus-unified-agent.conf @@ -0,0 +1,10 @@ +# Example taken from: https://github.com/influxdata/feature-requests/issues/160 + +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/measurement_name_int/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name_path = "class_id" + [[inputs.file.json_v2.field]] + path = "label" diff --git a/plugins/parsers/json_v2/testdata/measurement_name_int/expected.out b/plugins/parsers/json_v2/testdata/measurement_name_int/expected.out new file mode 100644 index 00000000..4afd678a --- /dev/null +++ b/plugins/parsers/json_v2/testdata/measurement_name_int/expected.out @@ -0,0 +1 @@ +32 label="Basic" diff --git a/plugins/parsers/json_v2/testdata/measurement_name_int/input.json b/plugins/parsers/json_v2/testdata/measurement_name_int/input.json new file mode 100644 index 00000000..34dccc62 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/measurement_name_int/input.json @@ -0,0 +1,19 @@ +{ + "value_id": "52-32-1-0", + "node_id": 52, + "class_id": 32, + "type": "byte", + "genre": "basic", + "instance": 1, + "index": 0, + "label": "Basic", + "units": "", + "help": "Basic status of the node", + "read_only": false, + "write_only": false, + "min": 0, + "max": 255, + "is_polled": false, + "value": 0, + "lastUpdate": 1584636017962 +} diff --git a/plugins/parsers/json_v2/testdata/mix_field_and_object/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/mix_field_and_object/circonus-unified-agent.conf new file mode 100644 index 00000000..ba9f90bb --- /dev/null +++ b/plugins/parsers/json_v2/testdata/mix_field_and_object/circonus-unified-agent.conf @@ -0,0 +1,16 @@ +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/mix_field_and_object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "openweather" + [[inputs.file.json_v2.field]] + path = "weather.#.main" + rename = "summary" + [[inputs.file.json_v2.field]] + path = "weather.#.description" + [[inputs.file.json_v2.object]] + path = "@this" + included_keys = ["coord_lat", "coord_lon", "main_temp", "wind_speed"] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that should be only included in result + tags = ["id", "name"] # List of JSON keys (for a nested key, prepend the parent keys with underscores) to be a tag instead of a field + diff --git a/plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out b/plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out new file mode 100644 index 00000000..e7f0e222 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out @@ -0,0 +1 @@ +openweather,id=2.643743e+06,name=London coord_lat=51.5085,coord_lon=-0.1257,description="few clouds",main_temp=12.54,summary="Clouds",wind_speed=2.11 1628186541000000000 diff --git a/plugins/parsers/json_v2/testdata/mix_field_and_object/input.json b/plugins/parsers/json_v2/testdata/mix_field_and_object/input.json new file mode 100644 index 00000000..402113af --- /dev/null +++ b/plugins/parsers/json_v2/testdata/mix_field_and_object/input.json @@ -0,0 +1,44 @@ +{ + "coord": { + "lon": -0.1257, + "lat": 51.5085 + }, + "weather": [ + { + "id": 801, + "main": "Clouds", + "description": "few clouds", + "icon": "02n" + } + ], + "base": "stations", + "main": { + "temp": 12.54, + "feels_like": 11.86, + "temp_min": 10.49, + "temp_max": 14.27, + "pressure": 1024, + "humidity": 77 + }, + "visibility": 10000, + "wind": { + "speed": 2.11, + "deg": 254, + "gust": 4.63 + }, + "clouds": { + "all": 21 + }, + "dt": 1633545358, + "sys": { + "type": 2, + "id": 2019646, + "country": "GB", + "sunrise": 1633500560, + "sunset": 1633541256 + }, + "timezone": 3600, + "id": 2643743, + "name": "London", + "cod": 200 +} diff --git a/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/circonus-unified-agent.conf new file mode 100644 index 00000000..527f0dad --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/circonus-unified-agent.conf @@ -0,0 +1,12 @@ +# Example getting nested fields with duplicate names + + +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/multiple_arrays_in_object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "book" + tags = ["title"] + disable_prepend_keys = true diff --git a/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out new file mode 100644 index 00000000..8365b551 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out @@ -0,0 +1,8 @@ +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party",name="Bilbo",random=1,species="hobbit" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Bilbo",random=1,species="hobbit" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party",name="Frodo",random=1,species="hobbit" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Frodo",random=1,species="hobbit" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party",name="Bilbo",random=2,species="hobbit" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Bilbo",random=2,species="hobbit" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party",name="Frodo",random=2,species="hobbit" +file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Frodo",random=2,species="hobbit" diff --git a/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/input.json b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/input.json new file mode 100644 index 00000000..271638a4 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/input.json @@ -0,0 +1,24 @@ +{ + "book": { + "title": "The Lord Of The Rings", + "chapters": [ + "A Long-expected Party", + "The Shadow of the Past" + ], + "author": "Tolkien", + "characters": [ + { + "name": "Bilbo", + "species": "hobbit" + }, + { + "name": "Frodo", + "species": "hobbit" + } + ], + "random": [ + 1, + 2 + ] + } +} diff --git a/plugins/parsers/json_v2/testdata/multiple_json_input/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/multiple_json_input/circonus-unified-agent.conf new file mode 100644 index 00000000..6e02e38b --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_json_input/circonus-unified-agent.conf @@ -0,0 +1,19 @@ +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/multiple_json_input/input_1.json", "./testdata/multiple_json_input/input_2.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "root.station" + [[inputs.file.json_v2.object.tag]] + path="#.abbr" + rename = "from_station" + [[inputs.file.json_v2.object.field]] + path = "#.etd.0.estimate.0.minutes" + rename = "minutes" + type = "int" + [[inputs.file.json_v2.object.tag]] + path = "#.etd.0.abbreviation" + rename = "to_station" + [[inputs.file.json_v2.object.tag]] + path = "#.etd.0.estimate.0.direction" diff --git a/plugins/parsers/json_v2/testdata/multiple_json_input/expected.out b/plugins/parsers/json_v2/testdata/multiple_json_input/expected.out new file mode 100644 index 00000000..f3fa9f0d --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_json_input/expected.out @@ -0,0 +1,2 @@ +file,from_station=COLM,to_station=ANTC,etd_estimate_direction=North minutes=2i +file,from_station=POWL,to_station=DALY,etd_estimate_direction=South minutes=6i diff --git a/plugins/parsers/json_v2/testdata/multiple_json_input/input_1.json b/plugins/parsers/json_v2/testdata/multiple_json_input/input_1.json new file mode 100644 index 00000000..f60cd59f --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_json_input/input_1.json @@ -0,0 +1,87 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=COLM&dir=n&json=y" + }, + "date": "07/02/2021", + "time": "06:05:47 PM PDT", + "station": [ + { + "name": "Colma", + "abbr": "COLM", + "etd": [ + { + "destination": "Antioch", + "abbreviation": "ANTC", + "limited": "0", + "estimate": [ + { + "minutes": "2", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "16", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "31", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Richmond", + "abbreviation": "RICH", + "limited": "0", + "estimate": [ + { + "minutes": "22", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "52", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/multiple_json_input/input_2.json b/plugins/parsers/json_v2/testdata/multiple_json_input/input_2.json new file mode 100644 index 00000000..e75e84a0 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_json_input/input_2.json @@ -0,0 +1,134 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=POWL&dir=s&json=y" + }, + "date": "07/02/2021", + "time": "06:06:01 PM PDT", + "station": [ + { + "name": "Powell St.", + "abbr": "POWL", + "etd": [ + { + "destination": "Daly City", + "abbreviation": "DALY", + "limited": "0", + "estimate": [ + { + "minutes": "6", + "platform": "1", + "direction": "South", + "length": "10", + "color": "GREEN", + "hexcolor": "#339933", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "26", + "platform": "1", + "direction": "South", + "length": "9", + "color": "BLUE", + "hexcolor": "#0099cc", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "36", + "platform": "1", + "direction": "South", + "length": "10", + "color": "GREEN", + "hexcolor": "#339933", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Millbrae", + "abbreviation": "MLBR", + "limited": "0", + "estimate": [ + { + "minutes": "19", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "49", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "79", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "SF Airport", + "abbreviation": "SFIA", + "limited": "0", + "estimate": [ + { + "minutes": "7", + "platform": "1", + "direction": "South", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "37", + "platform": "1", + "direction": "South", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "67", + "platform": "1", + "direction": "South", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/multiple_timestamps/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/multiple_timestamps/circonus-unified-agent.conf new file mode 100644 index 00000000..94fd8dbe --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_timestamps/circonus-unified-agent.conf @@ -0,0 +1,11 @@ + + +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/multiple_timestamps/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "events" + timestamp_key = "time" + timestamp_format = "unix_ms" diff --git a/plugins/parsers/json_v2/testdata/multiple_timestamps/expected.out b/plugins/parsers/json_v2/testdata/multiple_timestamps/expected.out new file mode 100644 index 00000000..0cc5bb93 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_timestamps/expected.out @@ -0,0 +1,2 @@ +file name="fire" 1555745371450794118 +file name="flood" 1555745371450794118 diff --git a/plugins/parsers/json_v2/testdata/multiple_timestamps/input.json b/plugins/parsers/json_v2/testdata/multiple_timestamps/input.json new file mode 100644 index 00000000..7931dca6 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_timestamps/input.json @@ -0,0 +1,12 @@ +{ + "events": [ + { + "name": "fire", + "time": "1555745371410" + }, + { + "name": "flood", + "time": "1555745371410" + } + ] +} diff --git a/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/circonus-unified-agent.conf new file mode 100644 index 00000000..8d4d45a9 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/circonus-unified-agent.conf @@ -0,0 +1,17 @@ + + +# Parse String types from JSON +[[inputs.file]] + instance_id = "circonus_test" +files = ["./testdata/nested_and_nonnested_tags/input.json"] +data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + disable_prepend_keys = true + path = "@this" + included_keys = [ + "systemVoltage", + "systemCurrent", + "tables", + ] + tags = ["hostname", "tables_outputname"] diff --git a/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/expected.out b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/expected.out new file mode 100644 index 00000000..d48b7660 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/expected.out @@ -0,0 +1,12 @@ +file,hostname=testhost1,outputname=1A-CC01-PC01 systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost1,outputname=2A-CC01-KA01 systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost1,outputname=3A-CC01-CC02 systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost1,outputname=4A systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost1,outputname=5A systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost1,outputname=6A-CC01-88-INV01-A systemVoltage=-54.1,systemCurrent=-3.8 +file,hostname=testhost2,outputname=1A systemVoltage=27.5,systemCurrent=9.5 +file,hostname=testhost2,outputname=2A systemVoltage=27.5,systemCurrent=9.5 +file,hostname=testhost2,outputname=3A systemVoltage=27.5,systemCurrent=9.5 +file,hostname=testhost2,outputname=4A systemVoltage=27.5,systemCurrent=9.5 +file,hostname=testhost2,outputname=5A systemVoltage=27.5,systemCurrent=9.5 +file,hostname=testhost2,outputname=6A systemVoltage=27.5,systemCurrent=9.5 diff --git a/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/input.json b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/input.json new file mode 100644 index 00000000..60d7f248 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/input.json @@ -0,0 +1,174 @@ +[ + { + "hostname": "testhost1", + "systemVoltage": -54.1, + "systemCurrent": -3.8, + "tables": [ + { + "outputnumber": 0.0, + "outputname": "1A-CC01-PC01", + "outputcurrent": -2.7, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 1.0, + "outputname": "2A-CC01-KA01", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 2.0, + "outputname": "3A-CC01-CC02", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 3.0, + "outputname": "4A", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 4.0, + "outputname": "5A", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 5.0, + "outputname": "6A-CC01-88-INV01-A", + "outputcurrent": -1.1, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "busnumber": 0.0, + "busname": "A--48A", + "busvoltage": -54.1, + "buscurrent": -3.8 + }, + { + "busnumber": 1.0, + "busname": "B--48B", + "busvoltage": -53.9, + "buscurrent": -4.2 + }, + { + "alarmnumber": 0.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 1.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 2.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 3.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 4.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + } + ] + }, + { + "hostname": "testhost2", + "systemVoltage": 27.5, + "systemCurrent": 9.5, + "tables": [ + { + "outputnumber": 0.0, + "outputname": "1A", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 1.0, + "outputname": "2A", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 2.0, + "outputname": "3A", + "outputcurrent": 0.0, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 3.0, + "outputname": "4A", + "outputcurrent": 0.6, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 4.0, + "outputname": "5A", + "outputcurrent": 6.5, + "outputfusestatus": 1.0, + "outputenable": 1.0 + }, + { + "outputnumber": 5.0, + "outputname": "6A", + "outputcurrent": 0.0, + "outputfusestatus": 2.0, + "outputenable": 1.0 + }, + { + "busnumber": 0.0, + "busname": "A-24V", + "busvoltage": 27.6, + "buscurrent": 0.6 + }, + { + "busnumber": 1.0, + "busname": "B-12V", + "busvoltage": 13.8, + "buscurrent": 0.0 + }, + { + "alarmnumber": 0.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 1.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 2.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 3.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + }, + { + "alarmnumber": 4.0, + "alarmname": "\u0000", + "alarmstatus": 1.0 + } + ] + } +] diff --git a/plugins/parsers/json_v2/testdata/nested_array_of_objects/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/nested_array_of_objects/circonus-unified-agent.conf new file mode 100644 index 00000000..17136069 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_array_of_objects/circonus-unified-agent.conf @@ -0,0 +1,16 @@ +# Example taken from: https://github.com/influxdata/feature-requests/issues/160 + +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/nested_array_of_objects/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "new_metric" + [[inputs.file.json_v2.object]] + path = "@this" + disable_prepend_keys = true + excluded_keys = ["tags", "timestamp"] + tags = ["name"] + [inputs.file.json_v2.object.fields] + data = "int" + diff --git a/plugins/parsers/json_v2/testdata/nested_array_of_objects/expected.out b/plugins/parsers/json_v2/testdata/nested_array_of_objects/expected.out new file mode 100644 index 00000000..972ea5ea --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_array_of_objects/expected.out @@ -0,0 +1,2 @@ +new_metric,name=partition LogEndOffset=339238i,LogStartOffset=339238i,NumLogSegments=1i,Size=0i,UnderReplicatedPartitions=0i 1610056029037925000 +new_metric,name=partition LogEndOffset=33914i,LogStartOffset=33238i,NumLogSegments=1i,Size=2i,UnderReplicatedPartitions=5i 1610056029037956000 diff --git a/plugins/parsers/json_v2/testdata/nested_array_of_objects/input.json b/plugins/parsers/json_v2/testdata/nested_array_of_objects/input.json new file mode 100644 index 00000000..86ded773 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_array_of_objects/input.json @@ -0,0 +1,36 @@ +[ + { + "data": { + "LogEndOffset": 339238, + "LogStartOffset": 339238, + "NumLogSegments": 1, + "Size": 0, + "UnderReplicatedPartitions": 0 + }, + "name": "partition", + "tags": { + "host": "CUD1-001559", + "jolokia_agent_url": "http://localhost:7777/jolokia", + "partition": "1", + "topic": "qa-kafka-connect-logs" + }, + "timestamp": 1591124461 + }, + { + "data": { + "LogEndOffset": 33914, + "LogStartOffset": 33238, + "NumLogSegments": 1, + "Size": 2, + "UnderReplicatedPartitions": 5 + }, + "name": "partition", + "tags": { + "host": "CUD1-001559", + "jolokia_agent_url": "http://localhost:7777/jolokia", + "partition": "1", + "topic": "qa-kafka-connect-logs" + }, + "timestamp": 1591124461 + } +] diff --git a/plugins/parsers/json_v2/testdata/nested_tags/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/nested_tags/circonus-unified-agent.conf new file mode 100644 index 00000000..649c0f0c --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags/circonus-unified-agent.conf @@ -0,0 +1,13 @@ + + +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/nested_tags/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "device0" + tags = ["Firmware", "Model", "Serial"] + [[inputs.file.json_v2.object]] + path = "device1" + tags = ["Firmware", "Model", "Serial"] diff --git a/plugins/parsers/json_v2/testdata/nested_tags/expected.out b/plugins/parsers/json_v2/testdata/nested_tags/expected.out new file mode 100644 index 00000000..7b31560a --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags/expected.out @@ -0,0 +1,2 @@ +file,Firmware=LDGSW07G,Model=WDC\ WUH721414ALE604,Serial=9JHNGTUT Count=0,Errors=0 +file,Firmware=LDGSW07G,Model=WDC\ WUH721414ALE604,Serial=9JHLPW9T Errors=0,Count=0 diff --git a/plugins/parsers/json_v2/testdata/nested_tags/input.json b/plugins/parsers/json_v2/testdata/nested_tags/input.json new file mode 100644 index 00000000..c3226f34 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags/input.json @@ -0,0 +1,16 @@ +{ + "device0": { + "Count": 0, + "Errors": 0, + "Serial": "9JHNGTUT", + "Model": "WDC WUH721414ALE604", + "Firmware": "LDGSW07G" + }, + "device1": { + "Count": 0, + "Errors": 0, + "Serial": "9JHLPW9T", + "Model": "WDC WUH721414ALE604", + "Firmware": "LDGSW07G" + } +} diff --git a/plugins/parsers/json_v2/testdata/nested_tags_complex/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/nested_tags_complex/circonus-unified-agent.conf new file mode 100644 index 00000000..579143f3 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags_complex/circonus-unified-agent.conf @@ -0,0 +1,15 @@ + + +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/nested_tags_complex/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "Group A" + disable_prepend_keys = true + tags = ["Sub-group_1_Firmware", "Sub-group_1_Model", "Sub-group_1_Serial"] + [[inputs.file.json_v2.object]] + path = "Group B" + disable_prepend_keys = true + tags = ["Sub-group_1_Firmware", "Sub-group_1_Model", "Sub-group_1_Serial"] diff --git a/plugins/parsers/json_v2/testdata/nested_tags_complex/expected.out b/plugins/parsers/json_v2/testdata/nested_tags_complex/expected.out new file mode 100644 index 00000000..92757bad --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags_complex/expected.out @@ -0,0 +1,3 @@ +file,Firmware=LDGSW07G,Model=WDC\ WUH721414ALE604,Serial=9JHNGTUT Count=0,Errors=0 +file,Firmware=LDGSW07G,Model=WDC\ WUH721414ALE604,Serial=9JHNGHJBT Errors=0,Count=2 +file,Firmware=LDGSW07G,Model=WDC\ WUH721414ALE604,Serial=9JHLPW9T Errors=0,Count=0 diff --git a/plugins/parsers/json_v2/testdata/nested_tags_complex/input.json b/plugins/parsers/json_v2/testdata/nested_tags_complex/input.json new file mode 100644 index 00000000..b373d90a --- /dev/null +++ b/plugins/parsers/json_v2/testdata/nested_tags_complex/input.json @@ -0,0 +1,35 @@ +{ + "Group A": [ + { + "Sub-group 1": [ + { + "Count": 0, + "Errors": 0, + "Serial": "9JHNGTUT", + "Model": "WDC WUH721414ALE604", + "Firmware": "LDGSW07G" + }, + { + "Count": 2, + "Errors": 0, + "Serial": "9JHNGHJBT", + "Model": "WDC WUH721414ALE604", + "Firmware": "LDGSW07G" + } + ] + } + ], + "Group B": [ + { + "Sub-group 1": [ + { + "Count": 0, + "Errors": 0, + "Serial": "9JHLPW9T", + "Model": "WDC WUH721414ALE604", + "Firmware": "LDGSW07G" + } + ] + } + ] +} diff --git a/plugins/parsers/json_v2/testdata/null/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/null/circonus-unified-agent.conf new file mode 100644 index 00000000..fbf824d3 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/null/circonus-unified-agent.conf @@ -0,0 +1,9 @@ +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/null/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "features" + tags = ["type", "id"] + disable_prepend_keys = true diff --git a/plugins/parsers/json_v2/testdata/null/expected.out b/plugins/parsers/json_v2/testdata/null/expected.out new file mode 100644 index 00000000..4f99713c --- /dev/null +++ b/plugins/parsers/json_v2/testdata/null/expected.out @@ -0,0 +1 @@ +file,id=ak0217l8ue0x,type=Feature detail="https://earthquake.usgs.gov/earthquakes/feed/v1.0/detail/ak0217l8ue0x.geojson",mag=1.5,place="63 km N of Petersville, Alaska",status="automatic",time=1623708726566,updated=1623709998223,url="https://earthquake.usgs.gov/earthquakes/eventpage/ak0217l8ue0x" diff --git a/plugins/parsers/json_v2/testdata/null/input.json b/plugins/parsers/json_v2/testdata/null/input.json new file mode 100644 index 00000000..757f5483 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/null/input.json @@ -0,0 +1,40 @@ +{ + "type": "FeatureCollection", + "metadata": { + "generated": 1623710450000, + "url": "https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_hour.geojson", + "title": "USGS All Earthquakes, Past Hour", + "status": 200, + "api": "1.10.3", + "count": 10 + }, + "features": [ + { + "type": "Feature", + "properties": { + "mag": 1.5, + "place": "63 km N of Petersville, Alaska", + "time": 1623708726566, + "updated": 1623709998223, + "tz": null, + "url": "https://earthquake.usgs.gov/earthquakes/eventpage/ak0217l8ue0x", + "detail": "https://earthquake.usgs.gov/earthquakes/feed/v1.0/detail/ak0217l8ue0x.geojson", + "felt": null, + "cdi": null, + "mmi": null, + "alert": null, + "status": "automatic" + }, + "id": "ak0217l8ue0x" + } + ], + "bbox": [ + -157.5749, + 32.9001667, + 0.25, + -115.6211667, + 66.331, + 132.5 + ] + } + \ No newline at end of file diff --git a/plugins/parsers/json_v2/testdata/object/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/object/circonus-unified-agent.conf new file mode 100644 index 00000000..6c59ae3a --- /dev/null +++ b/plugins/parsers/json_v2/testdata/object/circonus-unified-agent.conf @@ -0,0 +1,13 @@ +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "bart_json_v2" + [[inputs.file.json_v2.object]] + path = "root.station" + disable_prepend_keys = true + included_keys = ["etd_estimate_minutes"] + tags = ["name", "etd_destination"] + [inputs.file.json_v2.object.fields] + etd_estimate_minutes = "int" diff --git a/plugins/parsers/json_v2/testdata/object/expected.out b/plugins/parsers/json_v2/testdata/object/expected.out new file mode 100644 index 00000000..8832d32b --- /dev/null +++ b/plugins/parsers/json_v2/testdata/object/expected.out @@ -0,0 +1,5 @@ +bart_json_v2,destination=Antioch,name=Colma minutes=13i +bart_json_v2,destination=Antioch,name=Colma minutes=43i +bart_json_v2,destination=Millbrae,name=Colma minutes=19i +bart_json_v2,destination=Millbrae,name=Colma minutes=49i +bart_json_v2,destination=Millbrae,name=Colma minutes=79i diff --git a/plugins/parsers/json_v2/testdata/object/input.json b/plugins/parsers/json_v2/testdata/object/input.json new file mode 100644 index 00000000..cc8b0851 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/object/input.json @@ -0,0 +1,87 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=COLM&json=y" + }, + "date": "06/03/2021", + "time": "12:54:31 PM PDT", + "station": [ + { + "name": "Colma", + "abbr": "COLM", + "etd": [ + { + "destination": "Antioch", + "abbreviation": "ANTC", + "limited": "0", + "estimate": [ + { + "minutes": "13", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "43", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Millbrae", + "abbreviation": "MLBR", + "limited": "0", + "estimate": [ + { + "minutes": "19", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "49", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "79", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/object_timestamp/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/object_timestamp/circonus-unified-agent.conf new file mode 100644 index 00000000..77191048 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/object_timestamp/circonus-unified-agent.conf @@ -0,0 +1,13 @@ +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/object_timestamp/input.json"] + data_format = "json_v2" + + [[inputs.file.json_v2]] + measurement_name = "metric" + + [[inputs.file.json_v2.object]] + path = "events" + tags = ["id"] + timestamp_key = "time" + timestamp_format = "2006-01-02T15:04:05Z" diff --git a/plugins/parsers/json_v2/testdata/object_timestamp/expected.out b/plugins/parsers/json_v2/testdata/object_timestamp/expected.out new file mode 100644 index 00000000..eb9fb233 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/object_timestamp/expected.out @@ -0,0 +1,3 @@ +metric,id=100 value=100.123 1609459199000000000 +metric,id=101 value=200.001 1276605000000000000 +metric,id=102 value=999.999 946684800000000000 diff --git a/plugins/parsers/json_v2/testdata/object_timestamp/input.json b/plugins/parsers/json_v2/testdata/object_timestamp/input.json new file mode 100644 index 00000000..9f621a19 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/object_timestamp/input.json @@ -0,0 +1,19 @@ +{ + "events": [ + { + "id": 100, + "value": 100.123, + "time": "2020-12-31T23:59:59Z" + }, + { + "id": 101, + "value": 200.001, + "time": "2010-06-15T12:30:00Z" + }, + { + "id": 102, + "value": 999.999, + "time": "2000-01-01T00:00:00Z" + } + ] +} diff --git a/plugins/parsers/json_v2/testdata/optional/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/optional/circonus-unified-agent.conf new file mode 100644 index 00000000..e223ded6 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/optional/circonus-unified-agent.conf @@ -0,0 +1,16 @@ + + +# Parse String types from JSON +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/optional/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.field]] + path = "wrong" + optional = true + [[inputs.file.json_v2.object]] + path = "wrong" + optional = true + + diff --git a/plugins/parsers/json_v2/testdata/optional/expected.out b/plugins/parsers/json_v2/testdata/optional/expected.out new file mode 100644 index 00000000..e69de29b diff --git a/plugins/parsers/json_v2/testdata/optional/input.json b/plugins/parsers/json_v2/testdata/optional/input.json new file mode 100644 index 00000000..71374789 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/optional/input.json @@ -0,0 +1,3 @@ +{ + "test": "test" +} diff --git a/plugins/parsers/json_v2/testdata/optional_objects/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/optional_objects/circonus-unified-agent.conf new file mode 100644 index 00000000..7984c63e --- /dev/null +++ b/plugins/parsers/json_v2/testdata/optional_objects/circonus-unified-agent.conf @@ -0,0 +1,22 @@ +# Parse different objects +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/optional_objects/input_1.json", "./testdata/optional_objects/input_2.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + timestamp_path = "Time" + timestamp_format = "2006-01-02T15:04:05" + [[inputs.file.json_v2.object]] + path = "BME280" + included_keys = ["Humidity", "Pressure", "Temperature"] + optional = true + [[inputs.file.json_v2.object]] + path = "TSL2561" + included_keys = ["Illuminance"] + optional = true + [[inputs.file.json_v2.object]] + path = "MT681" + optional = true + tags = ["Meter_number"] + + diff --git a/plugins/parsers/json_v2/testdata/optional_objects/expected.out b/plugins/parsers/json_v2/testdata/optional_objects/expected.out new file mode 100644 index 00000000..a6908cf5 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/optional_objects/expected.out @@ -0,0 +1,3 @@ +file,Meter_number=12344 Power_curr=168,Total_in=2769.492 1646326975000000000 +file Humidity=63.3,Pressure=1020.6,Temperature=22.5 1646335555000000000 +file Illuminance=2.022 1646335555000000000 diff --git a/plugins/parsers/json_v2/testdata/optional_objects/input_1.json b/plugins/parsers/json_v2/testdata/optional_objects/input_1.json new file mode 100644 index 00000000..11d0a20a --- /dev/null +++ b/plugins/parsers/json_v2/testdata/optional_objects/input_1.json @@ -0,0 +1 @@ +{"Time":"2022-03-03T17:02:55","MT681":{"Power_curr":168,"Total_in":2769.492,"Meter_number":"12344"}} diff --git a/plugins/parsers/json_v2/testdata/optional_objects/input_2.json b/plugins/parsers/json_v2/testdata/optional_objects/input_2.json new file mode 100644 index 00000000..89ffc6ee --- /dev/null +++ b/plugins/parsers/json_v2/testdata/optional_objects/input_2.json @@ -0,0 +1 @@ +{"Time":"2022-03-03T19:25:55","BME280":{"Temperature":22.5,"Humidity":63.3,"DewPoint":15.1,"Pressure":1020.6,"SeaPressure":1024.1},"TSL2561":{"Illuminance":2.022,"IR":16,"Broadband":78},"PressureUnit":"hPa","TempUnit":"C"} diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/circonus-unified-agent.conf new file mode 100644 index 00000000..e2489f3a --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/circonus-unified-agent.conf @@ -0,0 +1,18 @@ +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/subfieldtag_in_object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "root.station" + [[inputs.file.json_v2.object.field]] + path = "#.etd.0.estimate.0.minutes" + type = "int" + [[inputs.file.json_v2.object.tag]] + path = "#.abbr" + rename = "from_station" + [[inputs.file.json_v2.object.tag]] + path = "#.etd.0.abbreviation" + rename = "to_station" + [[inputs.file.json_v2.object.tag]] + path = "#.etd.0.estimate.0.direction" diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object/expected.out b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/expected.out new file mode 100644 index 00000000..a7db8386 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/expected.out @@ -0,0 +1 @@ +file,from_station=COLM,to_station=ANTC,etd_estimate_direction=North etd_estimate_minutes=6i diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object/input.json b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/input.json new file mode 100644 index 00000000..45d0d551 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/input.json @@ -0,0 +1,97 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=COLM&dir=n&json=y" + }, + "date": "06/25/2021", + "time": "05:01:31 PM PDT", + "station": [ + { + "name": "Colma", + "abbr": "COLM", + "etd": [ + { + "destination": "Antioch", + "abbreviation": "ANTC", + "limited": "0", + "estimate": [ + { + "minutes": "6", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "36", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "51", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Richmond", + "abbreviation": "RICH", + "limited": "0", + "estimate": [ + { + "minutes": "12", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "26", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "41", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/circonus-unified-agent.conf new file mode 100644 index 00000000..92cce3e4 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/circonus-unified-agent.conf @@ -0,0 +1,17 @@ + + +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/subfieldtag_in_object_2/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "@this" + [[inputs.file.json_v2.object.tag]] + path = "data" + [[inputs.file.json_v2.object.field]] + path = "cnt" + type = "int" + [[inputs.file.json_v2.object.field]] + path = "format" + type = "int" diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/expected.out b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/expected.out new file mode 100644 index 00000000..89748967 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/expected.out @@ -0,0 +1,4 @@ +file,data=3 cnt=23i,format=0i +file,data=7 cnt=23i,format=0i +file,data=10 cnt=23i,format=0i +file,data=23 cnt=23i,format=0i diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/input.json b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/input.json new file mode 100644 index 00000000..62b768ea --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/input.json @@ -0,0 +1,10 @@ +{ + "cnt": 23, + "data": [ + 3, + 7, + 10, + 23 + ], + "format": 0 +} diff --git a/plugins/parsers/json_v2/testdata/timestamp/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/timestamp/circonus-unified-agent.conf new file mode 100644 index 00000000..dcd8e3c4 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp/circonus-unified-agent.conf @@ -0,0 +1,12 @@ + + +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/timestamp/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + timestamp_path = "time" + timestamp_format = "unix_ms" + [[inputs.file.json_v2.object]] + path = "measurements" + tags = ["name", "units"] diff --git a/plugins/parsers/json_v2/testdata/timestamp/expected.out b/plugins/parsers/json_v2/testdata/timestamp/expected.out new file mode 100644 index 00000000..e2e74151 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp/expected.out @@ -0,0 +1,4 @@ +file,name=temperature,units=℃ value=23.4 1555745371450794118 +file,name=moisture,units=% value=5 1555745371450794118 +file,name=light,units=lux value=10118 1555745371450794118 +file,name=fertility,units=us/cm value=0 1555745371450794118 diff --git a/plugins/parsers/json_v2/testdata/timestamp/input.json b/plugins/parsers/json_v2/testdata/timestamp/input.json new file mode 100644 index 00000000..356d986e --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp/input.json @@ -0,0 +1,25 @@ +{ + "time": 1555745371410, + "measurements": [ + { + "name": "temperature", + "value": 23.4, + "units": "℃" + }, + { + "name": "moisture", + "value": 5, + "units": "%" + }, + { + "name": "light", + "value": 10118, + "units": "lux" + }, + { + "name": "fertility", + "value": 0, + "units": "us/cm" + } + ] +} diff --git a/plugins/parsers/json_v2/testdata/timestamp_ns/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/timestamp_ns/circonus-unified-agent.conf new file mode 100644 index 00000000..6b27fa7b --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp_ns/circonus-unified-agent.conf @@ -0,0 +1,12 @@ + + +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/timestamp_ns/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "test" + timestamp_path = "timestamp" + timestamp_format = "unix_ns" + [[inputs.file.json_v2.object]] + path = "test" diff --git a/plugins/parsers/json_v2/testdata/timestamp_ns/expected.out b/plugins/parsers/json_v2/testdata/timestamp_ns/expected.out new file mode 100644 index 00000000..ee983ede --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp_ns/expected.out @@ -0,0 +1,2 @@ +test value=0 1631202459121654321 +test value=1 1631202459121654321 diff --git a/plugins/parsers/json_v2/testdata/timestamp_ns/input.json b/plugins/parsers/json_v2/testdata/timestamp_ns/input.json new file mode 100644 index 00000000..bb911fee --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp_ns/input.json @@ -0,0 +1,7 @@ +{ + "test": [ + { "value": 0 }, + { "value": 1 } + ], + "timestamp": 1631202459121654321 +} diff --git a/plugins/parsers/json_v2/testdata/timestamp_rfc3339/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/timestamp_rfc3339/circonus-unified-agent.conf new file mode 100644 index 00000000..e454e6df --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp_rfc3339/circonus-unified-agent.conf @@ -0,0 +1,9 @@ +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/timestamp_rfc3339/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + timestamp_path = "when" + timestamp_format = "rfc3339" + [[inputs.file.json_v2.field]] + path = "f" diff --git a/plugins/parsers/json_v2/testdata/timestamp_rfc3339/expected.out b/plugins/parsers/json_v2/testdata/timestamp_rfc3339/expected.out new file mode 100644 index 00000000..8d1e1285 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp_rfc3339/expected.out @@ -0,0 +1 @@ +file f="value" 1644434944000000000 diff --git a/plugins/parsers/json_v2/testdata/timestamp_rfc3339/input.json b/plugins/parsers/json_v2/testdata/timestamp_rfc3339/input.json new file mode 100644 index 00000000..9e02e014 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/timestamp_rfc3339/input.json @@ -0,0 +1,4 @@ +{ + "when": "2022-02-09T19:29:04Z", + "f": "value" +} diff --git a/plugins/parsers/json_v2/testdata/types/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/types/circonus-unified-agent.conf new file mode 100644 index 00000000..d9adddac --- /dev/null +++ b/plugins/parsers/json_v2/testdata/types/circonus-unified-agent.conf @@ -0,0 +1,109 @@ + + +# Parse String types from JSON +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/types/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.field]] + rename = "explicitstringtypeName" + path = "explicitstringtype" + type = "string" + [[inputs.file.json_v2.field]] + rename = "defaultstringtypeName" + path = "defaultstringtype" + [[inputs.file.json_v2.field]] + rename = "convertbooltostringName" + path = "convertbooltostring" + type = "string" + [[inputs.file.json_v2.field]] + rename = "convertinttostringName" + path = "convertinttostring" + type = "string" + [[inputs.file.json_v2.field]] + rename = "convertfloattostringName" + path = "convertfloattostring" + type = "string" + +# Parse int typess from JSON +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/types/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.field]] + rename = "explicitinttypeName" + path = "explicitinttype" + type = "int" + [[inputs.file.json_v2.field]] + rename = "uinttype" + path = "explicitinttype" + type = "uint" + [[inputs.file.json_v2.field]] + rename = "defaultinttypeName" + path = "defaultinttype" + [[inputs.file.json_v2.field]] + rename = "convertfloatointName" + path = "convertfloatoint" + type = "int" + [[inputs.file.json_v2.field]] + rename = "convertstringtointName" + path = "convertstringtoint" + type = "int" + [[inputs.file.json_v2.field]] + rename = "convertbooltointName" + path = "convertbooltoint" + type = "int" + +# Parse float types from JSON +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/types/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.field]] + rename = "explicitfloattypeName" + path = "explicitfloattype" + type = "float" + [[inputs.file.json_v2.field]] + rename = "defaultfloattypeName" + path = "defaultfloattype" + [[inputs.file.json_v2.field]] + rename = "convertintotfloatName" + path = "convertintotfloat" + type = "float" + [[inputs.file.json_v2.field]] + rename = "convertstringtofloatName" + path = "convertstringtofloat" + type = "float" + +# Parse bool types from JSON +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/types/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.field]] + rename = "explicitbooltypeName" + path = "explicitbooltype" + type = "bool" + [[inputs.file.json_v2.field]] + rename = "defaultbooltypeName" + path = "defaultbooltype" + [[inputs.file.json_v2.field]] + rename = "convertinttoboolName" + path = "convertinttobool" + type = "bool" + [[inputs.file.json_v2.field]] + rename = "convertstringtoboolName" + path = "convertstringtobool" + type = "bool" + [[inputs.file.json_v2.field]] + rename = "convertintstringtoboolTrueName" + path = "convertintstringtoboolTrue" + type = "bool" + [[inputs.file.json_v2.field]] + rename = "convertintstringtoboolFalseName" + path = "convertintstringtoboolFalse" + type = "bool" diff --git a/plugins/parsers/json_v2/testdata/types/expected.out b/plugins/parsers/json_v2/testdata/types/expected.out new file mode 100644 index 00000000..87cee38e --- /dev/null +++ b/plugins/parsers/json_v2/testdata/types/expected.out @@ -0,0 +1,4 @@ +file explicitstringtypeName="Bilbo",defaultstringtypeName="Baggins",convertbooltostringName="true",convertinttostringName="1",convertfloattostringName="1.1" +file defaultinttypeName=2,convertfloatointName=3i,convertstringtointName=4i,convertbooltointName=0i,explicitinttypeName=1i,uinttype=1u +file convertstringtofloatName=4.1,explicitfloattypeName=1.1,defaultfloattypeName=2.1,convertintotfloatName=3 +file explicitbooltypeName=true,defaultbooltypeName=false,convertinttoboolName=true,convertstringtoboolName=false,convertintstringtoboolTrueName=true,convertintstringtoboolFalseName=false diff --git a/plugins/parsers/json_v2/testdata/types/input.json b/plugins/parsers/json_v2/testdata/types/input.json new file mode 100644 index 00000000..bb85fc9e --- /dev/null +++ b/plugins/parsers/json_v2/testdata/types/input.json @@ -0,0 +1,22 @@ +{ + "explicitstringtype": "Bilbo", + "defaultstringtype": "Baggins", + "convertbooltostring": true, + "convertinttostring": 1, + "convertfloattostring": 1.1, + "explicitinttype": 1, + "defaultinttype": 2, + "convertfloatoint": 3.1, + "convertstringtoint": "4", + "convertbooltoint": false, + "explicitfloattype": 1.1, + "defaultfloattype": 2.1, + "convertintotfloat": 3, + "convertstringtofloat": "4.1", + "explicitbooltype": true, + "defaultbooltype": false, + "convertinttobool": 1, + "convertstringtobool": "false", + "convertintstringtoboolTrue": "1", + "convertintstringtoboolFalse": "0" +} diff --git a/plugins/parsers/json_v2/testdata/wrong_path/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/wrong_path/circonus-unified-agent.conf new file mode 100644 index 00000000..3553e815 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/wrong_path/circonus-unified-agent.conf @@ -0,0 +1,50 @@ +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/wrong_path/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "wrong" + +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/wrong_path/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "correct" + [[inputs.file.json_v2.object.tag]] + path = "wrong" + +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/wrong_path/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "correct" + [[inputs.file.json_v2.object.field]] + path = "wrong" + +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/wrong_path/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + timestamp_path = "wrong" + +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/wrong_path/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.tag]] + path = "wrong" + +[[inputs.file]] + instance_id = "circonus_test" + files = ["./testdata/wrong_path/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.field]] + path = "wrong" \ No newline at end of file diff --git a/plugins/parsers/json_v2/testdata/wrong_path/expected.out b/plugins/parsers/json_v2/testdata/wrong_path/expected.out new file mode 100644 index 00000000..e69de29b diff --git a/plugins/parsers/json_v2/testdata/wrong_path/input.json b/plugins/parsers/json_v2/testdata/wrong_path/input.json new file mode 100644 index 00000000..74b6ec88 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/wrong_path/input.json @@ -0,0 +1,3 @@ +{ + "correct": "test" +} diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 0182466e..f1a5d11d 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -12,6 +12,7 @@ import ( "github.com/circonus-labs/circonus-unified-agent/plugins/parsers/grok" "github.com/circonus-labs/circonus-unified-agent/plugins/parsers/influx" "github.com/circonus-labs/circonus-unified-agent/plugins/parsers/json" + "github.com/circonus-labs/circonus-unified-agent/plugins/parsers/json_v2" "github.com/circonus-labs/circonus-unified-agent/plugins/parsers/logfmt" "github.com/circonus-labs/circonus-unified-agent/plugins/parsers/nagios" "github.com/circonus-labs/circonus-unified-agent/plugins/parsers/value" @@ -148,6 +149,9 @@ type Config struct { // FormData configuration FormUrlencodedTagKeys []string `toml:"form_urlencoded_tag_keys"` + + // JSONPath configuration + JSONV2Config []JSONV2Config `toml:"json_v2"` } // NewParser returns a Parser interface based on the given config. @@ -232,6 +236,8 @@ func NewParser(config *Config) (Parser, error) { config.DefaultTags, config.FormUrlencodedTagKeys, ) + case "json_v2": + parser, err = NewJSONPathParser(config.JSONV2Config) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } @@ -339,3 +345,27 @@ func NewFormUrlencodedParser( TagKeys: tagKeys, }, nil } + +type JSONV2Config struct { + json_v2.Config +} + +func NewJSONPathParser(jsonv2config []JSONV2Config) (Parser, error) { + configs := make([]json_v2.Config, len(jsonv2config)) + for i, cfg := range jsonv2config { + configs[i].MeasurementName = cfg.MeasurementName + configs[i].MeasurementNamePath = cfg.MeasurementNamePath + + configs[i].TimestampPath = cfg.TimestampPath + configs[i].TimestampFormat = cfg.TimestampFormat + configs[i].TimestampTimezone = cfg.TimestampTimezone + + configs[i].Fields = cfg.Fields + configs[i].Tags = cfg.Tags + + configs[i].JSONObjects = cfg.JSONObjects + } + return &json_v2.Parser{ + Configs: configs, + }, nil +} From c230c21b1331cd028f9e7a4e1d15e8f12952fcc7 Mon Sep 17 00:00:00 2001 From: Will Krause Date: Fri, 10 Jun 2022 10:59:58 -0400 Subject: [PATCH 2/6] fix golangci-lint errors --- config/config.go | 2 +- plugins/parsers/json_v2/parser.go | 20 +++++++++++--------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/config/config.go b/config/config.go index 33019890..39b08cdd 100644 --- a/config/config.go +++ b/config/config.go @@ -1417,7 +1417,7 @@ func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, c.getFieldBool(tbl, "csv_trim_space", &pc.CSVTrimSpace) c.getFieldStringSlice(tbl, "form_urlencoded_tag_keys", &pc.FormUrlencodedTagKeys) - //for JSONPath parser + // for JSONPath parser if node, ok := tbl.Fields["json_v2"]; ok { if metricConfigs, ok := node.([]*ast.Table); ok { pc.JSONV2Config = make([]parsers.JSONV2Config, len(metricConfigs)) diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index d5e2e4cf..5388dd04 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -175,7 +175,7 @@ func (p *Parser) processMetric(input []byte, data []DataSet, tag bool, timestamp } p.iterateObjects = false - var metrics [][]cua.Metric + metrics := [][]cua.Metric{} for _, c := range data { if c.Path == "" { @@ -571,7 +571,8 @@ func (p *Parser) isIncluded(key string, val gjson.Result) bool { return true } // automatically adds tags to included_keys so it does NOT have to be repeated in the config - allKeys := append(p.objectConfig.IncludedKeys, p.objectConfig.Tags...) + var allKeys = p.objectConfig.IncludedKeys + allKeys = append(allKeys, p.objectConfig.Tags...) for _, i := range allKeys { if i == key { return true @@ -611,25 +612,25 @@ func (p *Parser) convertType(input gjson.Result, desiredType string, name string case "uint": r, err := strconv.ParseUint(inputType, 10, 64) if err != nil { - return nil, fmt.Errorf("Unable to convert field '%s' to type uint: %v", name, err) + return nil, fmt.Errorf("Unable to convert field '%s' to type uint: %w", name, err) } return r, nil case "int": r, err := strconv.ParseInt(inputType, 10, 64) if err != nil { - return nil, fmt.Errorf("Unable to convert field '%s' to type int: %v", name, err) + return nil, fmt.Errorf("Unable to convert field '%s' to type int: %w", name, err) } return r, nil case "float": r, err := strconv.ParseFloat(inputType, 64) if err != nil { - return nil, fmt.Errorf("Unable to convert field '%s' to type float: %v", name, err) + return nil, fmt.Errorf("Unable to convert field '%s' to type float: %w", name, err) } return r, nil case "bool": r, err := strconv.ParseBool(inputType) if err != nil { - return nil, fmt.Errorf("Unable to convert field '%s' to type bool: %v", name, err) + return nil, fmt.Errorf("Unable to convert field '%s' to type bool: %w", name, err) } return r, nil } @@ -659,11 +660,12 @@ func (p *Parser) convertType(input gjson.Result, desiredType string, name string case "uint": return input.Uint(), nil case "bool": - if inputType == 0 { + switch inputType { + case 0: return false, nil - } else if inputType == 1 { + case 1: return true, nil - } else { + default: return nil, fmt.Errorf("Unable to convert field '%s' to type bool", name) } } From 62ea06602918bf9cf950f045523cd7a1449f91fe Mon Sep 17 00:00:00 2001 From: Will Krause Date: Fri, 10 Jun 2022 14:32:04 -0400 Subject: [PATCH 3/6] fix file initializer --- plugins/inputs/file/file.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go index a0e13d43..d477d996 100644 --- a/plugins/inputs/file/file.go +++ b/plugins/inputs/file/file.go @@ -63,7 +63,10 @@ func (f *File) Description() string { func (f *File) Init() error { var err error f.decoder, err = encoding.NewDecoder(f.CharacterEncoding) - return fmt.Errorf("new decoder: %w", err) + if err != nil { + return fmt.Errorf("new decoder: %w", err) + } + return err } func (f *File) Gather(ctx context.Context, acc cua.Accumulator) error { From ef9c0e6de2c2a1612740484067475cdfb9bf3642 Mon Sep 17 00:00:00 2001 From: Will Krause Date: Fri, 10 Jun 2022 14:44:56 -0400 Subject: [PATCH 4/6] fix tests --- plugins/parsers/json_v2/parser_test.go | 210 +++++++++--------- .../wrong_path/circonus-unified-agent.conf | 50 ----- .../json_v2/testdata/wrong_path/expected.out | 0 .../json_v2/testdata/wrong_path/input.json | 3 - 4 files changed, 109 insertions(+), 154 deletions(-) delete mode 100644 plugins/parsers/json_v2/testdata/wrong_path/circonus-unified-agent.conf delete mode 100644 plugins/parsers/json_v2/testdata/wrong_path/expected.out delete mode 100644 plugins/parsers/json_v2/testdata/wrong_path/input.json diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index 878c6ede..3797465e 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -1,113 +1,121 @@ package json_v2_test -// import ( -// "bufio" -// "fmt" -// "io/ioutil" -// "os" -// "strings" -// "testing" +import ( + "bufio" + "context" + "fmt" + "io/ioutil" + "os" + "strings" + "testing" + "time" -// "github.com/circonus-labs/circonus-unified-agent/config" -// "github.com/circonus-labs/circonus-unified-agent/cua" -// "github.com/circonus-labs/circonus-unified-agent/plugins/inputs" -// "github.com/circonus-labs/circonus-unified-agent/plugins/inputs/file" -// "github.com/circonus-labs/circonus-unified-agent/plugins/parsers/influx" -// "github.com/stretchr/testify/require" -// ) + "github.com/circonus-labs/circonus-unified-agent/config" + "github.com/circonus-labs/circonus-unified-agent/cua" + "github.com/circonus-labs/circonus-unified-agent/plugins/inputs" + "github.com/circonus-labs/circonus-unified-agent/plugins/inputs/file" + "github.com/circonus-labs/circonus-unified-agent/plugins/parsers/influx" + "github.com/circonus-labs/circonus-unified-agent/testutil" + "github.com/stretchr/testify/require" +) -// func TestMultipleConfigs(t *testing.T) { -// // Get all directories in testdata -// folders, err := ioutil.ReadDir("testdata") -// require.NoError(t, err) -// // Make sure testdata contains data -// require.Greater(t, len(folders), 0) +func TestMultipleConfigs(t *testing.T) { + // Get all directories in testdata + folders, err := ioutil.ReadDir("testdata") + require.NoError(t, err) + // Make sure testdata contains data + require.Greater(t, len(folders), 0) -// expectedErrors := []struct { -// Name string -// Error string -// }{ -// { -// Name: "wrong_path", -// Error: "wrong", -// }, -// } + expectedErrors := []struct { + Name string + Error string + }{ + { + Name: "wrong_path", + Error: "wrong", + }, + } -// for _, f := range folders { -// t.Run(f.Name(), func(t *testing.T) { -// // Process the telegraf config file for the test -// buf, err := os.ReadFile(fmt.Sprintf("testdata/%s/circonus-unified-agent.conf", f.Name())) -// require.NoError(t, err) -// inputs.Add("file", func() cua.Input { -// return &file.File{} -// }) -// cfg := config.NewConfig() -// err = cfg.LoadConfigData(buf) -// require.NoError(t, err) + for i, f := range folders { + if i > 1 { + continue + } + t.Run(f.Name(), func(t *testing.T) { + // Process the telegraf config file for the test + buf, err := os.ReadFile(fmt.Sprintf("testdata/%s/circonus-unified-agent.conf", f.Name())) + require.NoError(t, err) + inputs.Add("file", func() cua.Input { + return &file.File{} + }) + cfg := config.NewConfig() + err = cfg.LoadConfigData(buf) + require.NoError(t, err) -// for _, input := range cfg.Inputs { -// err = input.Init() -// require.NoError(t, err) -// // err = input.Gather(&acc) -// // If the test has an expected error then require one was received -// var expectedError bool -// for _, e := range expectedErrors { -// if e.Name == f.Name() { -// require.Contains(t, err.Error(), e.Error) -// expectedError = true -// break -// } -// } -// if !expectedError { -// require.NoError(t, err) -// } -// } + acc := testutil.Accumulator{} + ctx := context.Background() + for _, input := range cfg.Inputs { + err = input.Init() + require.NoError(t, err) + err = input.Gather(ctx, &acc) + // If the test has an expected error then require one was received + var expectedError bool + for _, e := range expectedErrors { + if e.Name == f.Name() { + require.Contains(t, err.Error(), e.Error) + expectedError = true + break + } + } + if !expectedError { + require.NoError(t, err) + } + } -// // Process expected metrics and compare with resulting metrics -// // expectedOutputs, err := readMetricFile(fmt.Sprintf("testdata/%s/expected.out", f.Name())) -// require.NoError(t, err) -// // resultingMetrics := acc.GetTelegrafMetrics() -// // testutil.RequireMetricsEqual(t, expectedOutputs, resultingMetrics, testutil.IgnoreTime()) + // Process expected metrics and compare with resulting metrics + expectedOutputs, err := readMetricFile(fmt.Sprintf("testdata/%s/expected.out", f.Name())) + require.NoError(t, err) + resultingMetrics := acc.GetCUAMetrics() + testutil.RequireMetricsEqual(t, expectedOutputs, resultingMetrics, testutil.IgnoreTime()) -// // Folder with timestamp prefixed will also check for matching timestamps to make sure they are parsed correctly -// // The milliseconds weren't matching, seemed like a rounding difference between the influx parser -// // Compares each metrics times separately and ignores milliseconds -// if strings.HasPrefix(f.Name(), "timestamp") { -// // require.Equal(t, len(expectedOutputs), len(resultingMetrics)) -// // for i, m := range resultingMetrics { -// // require.Equal(t, expectedOutputs[i].Time().Truncate(time.Second), m.Time().Truncate(time.Second)) -// // } -// } -// }) -// } -// } + // Folder with timestamp prefixed will also check for matching timestamps to make sure they are parsed correctly + // The milliseconds weren't matching, seemed like a rounding difference between the influx parser + // Compares each metrics times separately and ignores milliseconds + if strings.HasPrefix(f.Name(), "timestamp") { + require.Equal(t, len(expectedOutputs), len(resultingMetrics)) + for i, m := range resultingMetrics { + require.Equal(t, expectedOutputs[i].Time().Truncate(time.Second), m.Time().Truncate(time.Second)) + } + } + }) + } +} -// func readMetricFile(path string) ([]cua.Metric, error) { -// var metrics []cua.Metric -// expectedFile, err := os.Open(path) -// if err != nil { -// return metrics, err -// } -// defer expectedFile.Close() +func readMetricFile(path string) ([]cua.Metric, error) { + var metrics []cua.Metric + expectedFile, err := os.Open(path) + if err != nil { + return metrics, err + } + defer expectedFile.Close() -// parser := influx.NewParser(influx.NewMetricHandler()) -// scanner := bufio.NewScanner(expectedFile) -// for scanner.Scan() { -// line := scanner.Text() -// if line != "" { -// m, err := parser.ParseLine(line) -// // The timezone needs to be UTC to match the timestamp test results -// m.SetTime(m.Time().UTC()) -// if err != nil { -// return nil, fmt.Errorf("unable to parse metric in %q failed: %v", line, err) -// } -// metrics = append(metrics, m) -// } -// } -// err = expectedFile.Close() -// if err != nil { -// return metrics, err -// } + parser := influx.NewParser(influx.NewMetricHandler()) + scanner := bufio.NewScanner(expectedFile) + for scanner.Scan() { + line := scanner.Text() + if line != "" { + m, err := parser.ParseLine(line) + // The timezone needs to be UTC to match the timestamp test results + m.SetTime(m.Time().UTC()) + if err != nil { + return nil, fmt.Errorf("unable to parse metric in %q failed: %v", line, err) + } + metrics = append(metrics, m) + } + } + err = expectedFile.Close() + if err != nil { + return metrics, err + } -// return metrics, nil -// } + return metrics, nil +} diff --git a/plugins/parsers/json_v2/testdata/wrong_path/circonus-unified-agent.conf b/plugins/parsers/json_v2/testdata/wrong_path/circonus-unified-agent.conf deleted file mode 100644 index 3553e815..00000000 --- a/plugins/parsers/json_v2/testdata/wrong_path/circonus-unified-agent.conf +++ /dev/null @@ -1,50 +0,0 @@ -[[inputs.file]] - instance_id = "circonus_test" - files = ["./testdata/wrong_path/input.json"] - data_format = "json_v2" - [[inputs.file.json_v2]] - [[inputs.file.json_v2.object]] - path = "wrong" - -[[inputs.file]] - instance_id = "circonus_test" - files = ["./testdata/wrong_path/input.json"] - data_format = "json_v2" - [[inputs.file.json_v2]] - [[inputs.file.json_v2.object]] - path = "correct" - [[inputs.file.json_v2.object.tag]] - path = "wrong" - -[[inputs.file]] - instance_id = "circonus_test" - files = ["./testdata/wrong_path/input.json"] - data_format = "json_v2" - [[inputs.file.json_v2]] - [[inputs.file.json_v2.object]] - path = "correct" - [[inputs.file.json_v2.object.field]] - path = "wrong" - -[[inputs.file]] - instance_id = "circonus_test" - files = ["./testdata/wrong_path/input.json"] - data_format = "json_v2" - [[inputs.file.json_v2]] - timestamp_path = "wrong" - -[[inputs.file]] - instance_id = "circonus_test" - files = ["./testdata/wrong_path/input.json"] - data_format = "json_v2" - [[inputs.file.json_v2]] - [[inputs.file.json_v2.tag]] - path = "wrong" - -[[inputs.file]] - instance_id = "circonus_test" - files = ["./testdata/wrong_path/input.json"] - data_format = "json_v2" - [[inputs.file.json_v2]] - [[inputs.file.json_v2.field]] - path = "wrong" \ No newline at end of file diff --git a/plugins/parsers/json_v2/testdata/wrong_path/expected.out b/plugins/parsers/json_v2/testdata/wrong_path/expected.out deleted file mode 100644 index e69de29b..00000000 diff --git a/plugins/parsers/json_v2/testdata/wrong_path/input.json b/plugins/parsers/json_v2/testdata/wrong_path/input.json deleted file mode 100644 index 74b6ec88..00000000 --- a/plugins/parsers/json_v2/testdata/wrong_path/input.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "correct": "test" -} From a2d0cadcd4eea9eb1bd767611ae60bc3ed3f1f45 Mon Sep 17 00:00:00 2001 From: Will Krause Date: Fri, 10 Jun 2022 14:47:31 -0400 Subject: [PATCH 5/6] fix golangci-lint error --- plugins/parsers/json_v2/parser_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index 3797465e..a5ca5777 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -107,7 +107,7 @@ func readMetricFile(path string) ([]cua.Metric, error) { // The timezone needs to be UTC to match the timestamp test results m.SetTime(m.Time().UTC()) if err != nil { - return nil, fmt.Errorf("unable to parse metric in %q failed: %v", line, err) + return nil, fmt.Errorf("unable to parse metric in %q failed: %w", line, err) } metrics = append(metrics, m) } From 49d9b89859d5b2914cdd20efee533ee18a065f6b Mon Sep 17 00:00:00 2001 From: Will Krause Date: Fri, 10 Jun 2022 15:05:54 -0400 Subject: [PATCH 6/6] fix template init bug --- plugins/processors/template/template.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/processors/template/template.go b/plugins/processors/template/template.go index 346f1da2..490b4cb7 100644 --- a/plugins/processors/template/template.go +++ b/plugins/processors/template/template.go @@ -57,7 +57,11 @@ func (r *Processor) Init() error { t, err := template.New("configured_template").Parse(r.Template) r.tmpl = t - return fmt.Errorf("template new: %w", err) + if err != nil { + return fmt.Errorf("template new: %w", err) + } + return err + } func init() {