From a7a1dc2c150bca08b606050ae76f3c2caa8d7e9c Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Tue, 29 Sep 2020 12:54:31 -0700 Subject: [PATCH 1/5] feat(sources): add influxdb2 listsner, nsd, opcua, win_eventlog to sources page --- .../components/clientLibraries/Arduino.md | 2 +- .../components/telegrafPlugins/aerospike.md | 71 +++- .../telegrafPlugins/influxdb_v2_listener.md | 53 +++ .../components/telegrafPlugins/nsd.md | 176 +++++++++ .../components/telegrafPlugins/opcua.md | 80 ++++ .../components/telegrafPlugins/smart.md | 169 ++++++--- .../components/telegrafPlugins/snmp.md | 8 +- .../components/telegrafPlugins/snmp_trap.md | 5 +- .../components/telegrafPlugins/sqlserver.md | 172 +++++++-- .../components/telegrafPlugins/tail.md | 17 + .../telegrafPlugins/win_eventlog.md | 209 +++++++++++ .../constants/contentTelegrafPlugins.ts | 36 ++ .../graphics/influxdb_v2_listener.svg | 21 ++ src/writeData/graphics/nsd.svg | 25 ++ src/writeData/graphics/opcua.svg | 353 ++++++++++++++++++ src/writeData/graphics/win_eventlog.svg | 1 + 16 files changed, 1301 insertions(+), 97 deletions(-) create mode 100644 src/writeData/components/telegrafPlugins/influxdb_v2_listener.md create mode 100644 src/writeData/components/telegrafPlugins/nsd.md create mode 100644 src/writeData/components/telegrafPlugins/opcua.md create mode 100644 src/writeData/components/telegrafPlugins/win_eventlog.md create mode 100644 src/writeData/graphics/influxdb_v2_listener.svg create mode 100644 src/writeData/graphics/nsd.svg create mode 100644 src/writeData/graphics/opcua.svg create mode 100644 src/writeData/graphics/win_eventlog.svg diff --git a/src/writeData/components/clientLibraries/Arduino.md b/src/writeData/components/clientLibraries/Arduino.md index 26c5e8ad54..1f4d000029 100644 --- a/src/writeData/components/clientLibraries/Arduino.md +++ b/src/writeData/components/clientLibraries/Arduino.md @@ -1,4 +1,4 @@ -For more detailed and up to date information check out the [GitHub Repository](https://github.com/tobiasschuerg/InfluxDB-Client-for-Arduino) +For more detailed and up to date information check out the [GitHub Respository](https://github.com/tobiasschuerg/InfluxDB-Client-for-Arduino) ##### Install Library diff --git a/src/writeData/components/telegrafPlugins/aerospike.md b/src/writeData/components/telegrafPlugins/aerospike.md index 56775d908b..66fbbe12ec 100644 --- a/src/writeData/components/telegrafPlugins/aerospike.md +++ b/src/writeData/components/telegrafPlugins/aerospike.md @@ -28,11 +28,37 @@ All metrics are attempted to be cast to integers, then booleans, then strings. # tls_key = "/etc/telegraf/key.pem" ## If false, skip chain & host verification # insecure_skip_verify = true + + # Feature Options + # Add namespace variable to limit the namespaces executed on + # Leave blank to do all + # disable_query_namespaces = true # default false + # namespaces = ["namespace1", "namespace2"] + + # Enable set level telmetry + # query_sets = true # default: false + # Add namespace set combinations to limit sets executed on + # Leave blank to do all + # sets = ["namespace1/set1", "namespace1/set2"] + # sets = ["namespace1/set1", "namespace1/set2", "namespace3"] + + # Histograms + # enable_ttl_histogram = true # default: false + # enable_object_size_linear_histogram = true # default: false + + # by default, aerospike produces a 100 bucket histogram + # this is not great for most graphing tools, this will allow + # the ability to squash this to a smaller number of buckets + # To have a balanced histogram, the number of buckets chosen + # should divide evenly into 100. + # num_histogram_buckets = 100 # default: 10 + + ``` ### Measurements: -The aerospike metrics are under two measurement names: +The aerospike metrics are under a few measurement names: ***aerospike_node***: These are the aerospike **node** measurements, which are available from the aerospike `statistics` command. @@ -55,6 +81,36 @@ are available from the aerospike `namespace/` command. namespace/ ... ``` +***aerospike_set***: These are aerospike set measurements, which +are available from the aerospike `sets//` command. + + ie, + ``` + telnet localhost 3003 + sets + sets/ + sets// + ... + ``` +***aerospike_histogram_ttl***: These are aerospike ttl hisogram measurements, which +is available from the aerospike `histogram:namespace=;[set=;]type=ttl` command. + + ie, + ``` + telnet localhost 3003 + histogram:namespace=;type=ttl + histogram:namespace=;[set=;]type=ttl + ... + ``` +***aerospike_histogram_object_size_linear***: These are aerospike object size linear histogram measurements, which is available from the aerospike `histogram:namespace=;[set=;]type=object_size_linear` command. + + ie, + ``` + telnet localhost 3003 + histogram:namespace=;type=object_size_linear + histogram:namespace=;[set=;]type=object_size_linear + ... + ``` ### Tags: @@ -67,10 +123,23 @@ Namespace metrics have tags: - namespace_name +Set metrics have tags: + +- namespace_name +- set_name + +Histogram metrics have tags: +- namespace_name +- set_name (optional) +- type + ### Example Output: ``` % telegraf --input-filter aerospike --test > aerospike_node,aerospike_host=localhost:3000,node_name="BB9020011AC4202" batch_error=0i,batch_index_complete=0i,batch_index_created_buffers=0i,batch_index_destroyed_buffers=0i,batch_index_error=0i,batch_index_huge_buffers=0i,batch_index_initiate=0i,batch_index_queue="0:0,0:0,0:0,0:0",batch_index_timeout=0i,batch_index_unused_buffers=0i,batch_initiate=0i,batch_queue=0i,batch_timeout=0i,client_connections=6i,cluster_integrity=true,cluster_key="8AF422E05281249E",cluster_size=1i,delete_queue=0i,demarshal_error=0i,early_tsvc_batch_sub_error=0i,early_tsvc_client_error=0i,early_tsvc_udf_sub_error=0i,fabric_connections=16i,fabric_msgs_rcvd=0i,fabric_msgs_sent=0i,heartbeat_connections=0i,heartbeat_received_foreign=0i,heartbeat_received_self=0i,info_complete=47i,info_queue=0i,migrate_allowed=true,migrate_partitions_remaining=0i,migrate_progress_recv=0i,migrate_progress_send=0i,objects=0i,paxos_principal="BB9020011AC4202",proxy_in_progress=0i,proxy_retry=0i,query_long_running=0i,query_short_running=0i,reaped_fds=0i,record_refs=0i,rw_in_progress=0i,scans_active=0i,sindex_gc_activity_dur=0i,sindex_gc_garbage_cleaned=0i,sindex_gc_garbage_found=0i,sindex_gc_inactivity_dur=0i,sindex_gc_list_creation_time=0i,sindex_gc_list_deletion_time=0i,sindex_gc_locktimedout=0i,sindex_gc_objects_validated=0i,sindex_ucgarbage_found=0i,sub_objects=0i,system_free_mem_pct=92i,system_swapping=false,tsvc_queue=0i,uptime=1457i 1468923222000000000 > aerospike_namespace,aerospike_host=localhost:3000,namespace=test,node_name="BB9020011AC4202" allow_nonxdr_writes=true,allow_xdr_writes=true,available_bin_names=32768i,batch_sub_proxy_complete=0i,batch_sub_proxy_error=0i,batch_sub_proxy_timeout=0i,batch_sub_read_error=0i,batch_sub_read_not_found=0i,batch_sub_read_success=0i,batch_sub_read_timeout=0i,batch_sub_tsvc_error=0i,batch_sub_tsvc_timeout=0i,client_delete_error=0i,client_delete_not_found=0i,client_delete_success=0i,client_delete_timeout=0i,client_lang_delete_success=0i,client_lang_error=0i,client_lang_read_success=0i,client_lang_write_success=0i,client_proxy_complete=0i,client_proxy_error=0i,client_proxy_timeout=0i,client_read_error=0i,client_read_not_found=0i,client_read_success=0i,client_read_timeout=0i,client_tsvc_error=0i,client_tsvc_timeout=0i,client_udf_complete=0i,client_udf_error=0i,client_udf_timeout=0i,client_write_error=0i,client_write_success=0i,client_write_timeout=0i,cold_start_evict_ttl=4294967295i,conflict_resolution_policy="generation",current_time=206619222i,data_in_index=false,default_ttl=432000i,device_available_pct=99i,device_free_pct=100i,device_total_bytes=4294967296i,device_used_bytes=0i,disallow_null_setname=false,enable_benchmarks_batch_sub=false,enable_benchmarks_read=false,enable_benchmarks_storage=false,enable_benchmarks_udf=false,enable_benchmarks_udf_sub=false,enable_benchmarks_write=false,enable_hist_proxy=false,enable_xdr=false,evict_hist_buckets=10000i,evict_tenths_pct=5i,evict_ttl=0i,evicted_objects=0i,expired_objects=0i,fail_generation=0i,fail_key_busy=0i,fail_record_too_big=0i,fail_xdr_forbidden=0i,geo2dsphere_within.earth_radius_meters=6371000i,geo2dsphere_within.level_mod=1i,geo2dsphere_within.max_cells=12i,geo2dsphere_within.max_level=30i,geo2dsphere_within.min_level=1i,geo2dsphere_within.strict=true,geo_region_query_cells=0i,geo_region_query_falsepos=0i,geo_region_query_points=0i,geo_region_query_reqs=0i,high_water_disk_pct=50i,high_water_memory_pct=60i,hwm_breached=false,ldt_enabled=false,ldt_gc_rate=0i,ldt_page_size=8192i,master_objects=0i,master_sub_objects=0i,max_ttl=315360000i,max_void_time=0i,memory_free_pct=100i,memory_size=1073741824i,memory_used_bytes=0i,memory_used_data_bytes=0i,memory_used_index_bytes=0i,memory_used_sindex_bytes=0i,migrate_order=5i,migrate_record_receives=0i,migrate_record_retransmits=0i,migrate_records_skipped=0i,migrate_records_transmitted=0i,migrate_rx_instances=0i,migrate_rx_partitions_active=0i,migrate_rx_partitions_initial=0i,migrate_rx_partitions_remaining=0i,migrate_sleep=1i,migrate_tx_instances=0i,migrate_tx_partitions_active=0i,migrate_tx_partitions_imbalance=0i,migrate_tx_partitions_initial=0i,migrate_tx_partitions_remaining=0i,non_expirable_objects=0i,ns_forward_xdr_writes=false,nsup_cycle_duration=0i,nsup_cycle_sleep_pct=0i,objects=0i,prole_objects=0i,prole_sub_objects=0i,query_agg=0i,query_agg_abort=0i,query_agg_avg_rec_count=0i,query_agg_error=0i,query_agg_success=0i,query_fail=0i,query_long_queue_full=0i,query_long_reqs=0i,query_lookup_abort=0i,query_lookup_avg_rec_count=0i,query_lookup_error=0i,query_lookup_success=0i,query_lookups=0i,query_reqs=0i,query_short_queue_full=0i,query_short_reqs=0i,query_udf_bg_failure=0i,query_udf_bg_success=0i,read_consistency_level_override="off",repl_factor=1i,scan_aggr_abort=0i,scan_aggr_complete=0i,scan_aggr_error=0i,scan_basic_abort=0i,scan_basic_complete=0i,scan_basic_error=0i,scan_udf_bg_abort=0i,scan_udf_bg_complete=0i,scan_udf_bg_error=0i,set_deleted_objects=0i,sets_enable_xdr=true,sindex.data_max_memory="ULONG_MAX",sindex.num_partitions=32i,single_bin=false,stop_writes=false,stop_writes_pct=90i,storage_engine="device",storage_engine.cold_start_empty=false,storage_engine.data_in_memory=true,storage_engine.defrag_lwm_pct=50i,storage_engine.defrag_queue_min=0i,storage_engine.defrag_sleep=1000i,storage_engine.defrag_startup_minimum=10i,storage_engine.disable_odirect=false,storage_engine.enable_osync=false,storage_engine.file="/opt/aerospike/data/test.dat",storage_engine.filesize=4294967296i,storage_engine.flush_max_ms=1000i,storage_engine.fsync_max_sec=0i,storage_engine.max_write_cache=67108864i,storage_engine.min_avail_pct=5i,storage_engine.post_write_queue=0i,storage_engine.scheduler_mode="null",storage_engine.write_block_size=1048576i,storage_engine.write_threads=1i,sub_objects=0i,udf_sub_lang_delete_success=0i,udf_sub_lang_error=0i,udf_sub_lang_read_success=0i,udf_sub_lang_write_success=0i,udf_sub_tsvc_error=0i,udf_sub_tsvc_timeout=0i,udf_sub_udf_complete=0i,udf_sub_udf_error=0i,udf_sub_udf_timeout=0i,write_commit_level_override="off",xdr_write_error=0i,xdr_write_success=0i,xdr_write_timeout=0i,{test}_query_hist_track_back=300i,{test}_query_hist_track_slice=10i,{test}_query_hist_track_thresholds="1,8,64",{test}_read_hist_track_back=300i,{test}_read_hist_track_slice=10i,{test}_read_hist_track_thresholds="1,8,64",{test}_udf_hist_track_back=300i,{test}_udf_hist_track_slice=10i,{test}_udf_hist_track_thresholds="1,8,64",{test}_write_hist_track_back=300i,{test}_write_hist_track_slice=10i,{test}_write_hist_track_thresholds="1,8,64" 1468923222000000000 +> aerospike_set,aerospike_host=localhost:3000,node_name=BB99458B42826B0,set=test/test disable_eviction=false,memory_data_bytes=0i,objects=0i,set_enable_xdr="use-default",stop_writes_count=0i,tombstones=0i,truncate_lut=0i 1598033805000000000 +>> aerospike_histogram_ttl,aerospike_host=localhost:3000,namespace=test,node_name=BB98EE5B42826B0,set=test 0=0i,1=0i,10=0i,11=0i,12=0i,13=0i,14=0i,15=0i,16=0i,17=0i,18=0i,19=0i,2=0i,20=0i,21=0i,22=0i,23=0i,24=0i,25=0i,26=0i,27=0i,28=0i,29=0i,3=0i,30=0i,31=0i,32=0i,33=0i,34=0i,35=0i,36=0i,37=0i,38=0i,39=0i,4=0i,40=0i,41=0i,42=0i,43=0i,44=0i,45=0i,46=0i,47=0i,48=0i,49=0i,5=0i,50=0i,51=0i,52=0i,53=0i,54=0i,55=0i,56=0i,57=0i,58=0i,59=0i,6=0i,60=0i,61=0i,62=0i,63=0i,64=0i,65=0i,66=0i,67=0i,68=0i,69=0i,7=0i,70=0i,71=0i,72=0i,73=0i,74=0i,75=0i,76=0i,77=0i,78=0i,79=0i,8=0i,80=0i,81=0i,82=0i,83=0i,84=0i,85=0i,86=0i,87=0i,88=0i,89=0i,9=0i,90=0i,91=0i,92=0i,93=0i,94=0i,95=0i,96=0i,97=0i,98=0i,99=0i 1598034191000000000 + ``` diff --git a/src/writeData/components/telegrafPlugins/influxdb_v2_listener.md b/src/writeData/components/telegrafPlugins/influxdb_v2_listener.md new file mode 100644 index 0000000000..cef11e9ae3 --- /dev/null +++ b/src/writeData/components/telegrafPlugins/influxdb_v2_listener.md @@ -0,0 +1,53 @@ +# InfluxDB V2 Listener Input Plugin + +InfluxDB V2 Listener is a service input plugin that listens for requests sent +according to the [InfluxDB HTTP API][influxdb_http_api]. The intent of the +plugin is to allow Telegraf to serve as a proxy/router for the `/api/v2/write` +endpoint of the InfluxDB HTTP API. + +The `/api/v2/write` endpoint supports the `precision` query parameter and can be set +to one of `ns`, `us`, `ms`, `s`. All other parameters are ignored and +defer to the output plugins configuration. + +### Configuration: + +```toml +[[inputs.influxdb_v2_listener]] + ## Address and port to host InfluxDB listener on + service_address = ":9999" + + ## Maximum allowed HTTP request body size in bytes. + ## 0 means to use the default of 32MiB. + # max_body_size = "32MiB" + + ## Optional tag to determine the bucket. + ## If the write has a bucket in the query string then it will be kept in this tag name. + ## This tag can be used in downstream outputs. + ## The default value of nothing means it will be off and the database will not be recorded. + # bucket_tag = "" + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Optional token to accept for HTTP authentication. + ## You probably want to make sure you have TLS configured above for this. + # token = "some-long-shared-secret-token" +``` + +### Metrics: + +Metrics are created from InfluxDB Line Protocol in the request body. + +### Troubleshooting: + +**Example Query:** +``` +curl -i -XPOST 'http://localhost:8186/api/v2/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' +``` + +[influxdb_http_api]: https://v2.docs.influxdata.com/v2.0/api/ diff --git a/src/writeData/components/telegrafPlugins/nsd.md b/src/writeData/components/telegrafPlugins/nsd.md new file mode 100644 index 0000000000..2d7f8833c2 --- /dev/null +++ b/src/writeData/components/telegrafPlugins/nsd.md @@ -0,0 +1,176 @@ +# NSD Input Plugin + +This plugin gathers stats from +[NSD](https://www.nlnetlabs.nl/projects/nsd/about) - an authoritative DNS name +server. + +### Configuration: + +```toml +# A plugin to collect stats from the NSD DNS resolver +[[inputs.nsd]] + ## Address of server to connect to, optionally ':port'. Defaults to the + ## address in the nsd config file. + server = "127.0.0.1:8953" + + ## If running as a restricted user you can prepend sudo for additional access: + # use_sudo = false + + ## The default location of the nsd-control binary can be overridden with: + # binary = "/usr/sbin/nsd-control" + + ## The default location of the nsd config file can be overridden with: + # config_file = "/etc/nsd/nsd.conf" + + ## The default timeout of 1s can be overridden with: + # timeout = "1s" +``` + +#### Permissions: + +It's important to note that this plugin references nsd-control, which may +require additional permissions to execute successfully. Depending on the +user/group permissions of the telegraf user executing this plugin, you may +need to alter the group membership, set facls, or use sudo. + +**Group membership (Recommended)**: +```bash +$ groups telegraf +telegraf : telegraf + +$ usermod -a -G nsd telegraf + +$ groups telegraf +telegraf : telegraf nsd +``` + +**Sudo privileges**: +If you use this method, you will need the following in your telegraf config: +```toml +[[inputs.nsd]] + use_sudo = true +``` + +You will also need to update your sudoers file: +```bash +$ visudo +# Add the following line: +Cmnd_Alias NSDCONTROLCTL = /usr/sbin/nsd-control +telegraf ALL=(ALL) NOPASSWD: NSDCONTROLCTL +Defaults!NSDCONTROLCTL !logfile, !syslog, !pam_session +``` + +Please use the solution you see as most appropriate. + +### Metrics: + +This is the full list of stats provided by nsd-control. In the output, the +dots in the nsd-control stat name are replaced by underscores (see +https://www.nlnetlabs.nl/documentation/nsd/nsd-control/ for details). + +- nsd + - fields: + - num_queries + - time_boot + - time_elapsed + - size_db_disk + - size_db_mem + - size_xfrd_mem + - size_config_disk + - size_config_mem + - num_type_TYPE0 + - num_type_A + - num_type_NS + - num_type_MD + - num_type_MF + - num_type_CNAME + - num_type_SOA + - num_type_MB + - num_type_MG + - num_type_MR + - num_type_NULL + - num_type_WKS + - num_type_PTR + - num_type_HINFO + - num_type_MINFO + - num_type_MX + - num_type_TXT + - num_type_RP + - num_type_AFSDB + - num_type_X25 + - num_type_ISDN + - num_type_RT + - num_type_NSAP + - num_type_SIG + - num_type_KEY + - num_type_PX + - num_type_AAAA + - num_type_LOC + - num_type_NXT + - num_type_SRV + - num_type_NAPTR + - num_type_KX + - num_type_CERT + - num_type_DNAME + - num_type_OPT + - num_type_APL + - num_type_DS + - num_type_SSHFP + - num_type_IPSECKEY + - num_type_RRSIG + - num_type_NSEC + - num_type_DNSKEY + - num_type_DHCID + - num_type_NSEC3 + - num_type_NSEC3PARAM + - num_type_TLSA + - num_type_SMIMEA + - num_type_CDS + - num_type_CDNSKEY + - num_type_OPENPGPKEY + - num_type_CSYNC + - num_type_SPF + - num_type_NID + - num_type_L32 + - num_type_L64 + - num_type_LP + - num_type_EUI48 + - num_type_EUI64 + - num_type_TYPE252 + - num_type_TYPE253 + - num_type_TYPE255 + - num_opcode_QUERY + - num_opcode_NOTIFY + - num_class_CLASS0 + - num_class_IN + - num_class_CH + - num_rcode_NOERROR + - num_rcode_FORMERR + - num_rcode_SERVFAIL + - num_rcode_NXDOMAIN + - num_rcode_NOTIMP + - num_rcode_REFUSED + - num_rcode_YXDOMAIN + - num_rcode_NOTAUTH + - num_edns + - num_ednserr + - num_udp + - num_udp6 + - num_tcp + - num_tcp6 + - num_tls + - num_tls6 + - num_answer_wo_aa + - num_rxerr + - num_txerr + - num_raxfr + - num_truncated + - num_dropped + - zone_master + - zone_slave + +- nsd_servers + - tags: + - server + - fields: + - queries diff --git a/src/writeData/components/telegrafPlugins/opcua.md b/src/writeData/components/telegrafPlugins/opcua.md new file mode 100644 index 0000000000..173d98b6fa --- /dev/null +++ b/src/writeData/components/telegrafPlugins/opcua.md @@ -0,0 +1,80 @@ +# OPC UA Client Input Plugin + +The `opcua` plugin retrieves data from OPC UA client devices. + +Telegraf minimum version: Telegraf 1.16 +Plugin minimum tested version: 1.16 + +### Configuration: + +```toml +[[inputs.opcua]] + ## Device name + # name = "localhost" + # + ## OPC UA Endpoint URL + # endpoint = "opc.tcp://localhost:4840" + # + ## Maximum time allowed to establish a connect to the endpoint. + # connect_timeout = "10s" + # + ## Maximum time allowed for a request over the estabilished connection. + # request_timeout = "5s" + # + ## Security policy, one of "None", "Basic128Rsa15", "Basic256", + ## "Basic256Sha256", or "auto" + # security_policy = "auto" + # + ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto" + # security_mode = "auto" + # + ## Path to cert.pem. Required when security mode or policy isn't "None". + ## If cert path is not supplied, self-signed cert and key will be generated. + # certificate = "/etc/telegraf/cert.pem" + # + ## Path to private key.pem. Required when security mode or policy isn't "None". + ## If key path is not supplied, self-signed cert and key will be generated. + # private_key = "/etc/telegraf/key.pem" + # + ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To + ## authenticate using a specific ID, select 'Certificate' or 'UserName' + # auth_method = "Anonymous" + # + ## Username. Required for auth_method = "UserName" + # username = "" + # + ## Password. Required for auth_method = "UserName" + # password = "" + # + ## Node ID configuration + ## name - the variable name + ## namespace - integer value 0 thru 3 + ## identifier_type - s=string, i=numeric, g=guid, b=opaque + ## identifier - tag as shown in opcua browser + ## data_type - boolean, byte, short, int, uint, uint16, int16, + ## uint32, int32, float, double, string, datetime, number + ## Example: + ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", data_type="string", description="http://open62541.org"} + nodes = [ + {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, + {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, + ] +``` + +### Example Node Configuration +An OPC UA node ID may resemble: "n=3,s=Temperature". In this example: +- n=3 is indicating the `namespace` is 3 +- s=Temperature is indicting that the `identifier_type` is a string and `identifier` value is 'Temperature' +- This example temperature node has a value of 79.0, which makes the `data_type` a 'float'. +To gather data from this node enter the following line into the 'nodes' property above: +``` +{name="LabelName", namespace="3", identifier_type="s", identifier="Temperature", data_type="float", description="Description of node"}, +``` + + +### Example Output + +``` +opcua,host=3c70aee0901e,name=Random,type=double Random=0.018158170305814902 1597820490000000000 + +``` diff --git a/src/writeData/components/telegrafPlugins/smart.md b/src/writeData/components/telegrafPlugins/smart.md index 47320aeac2..d26ebc9678 100644 --- a/src/writeData/components/telegrafPlugins/smart.md +++ b/src/writeData/components/telegrafPlugins/smart.md @@ -1,75 +1,116 @@ # S.M.A.R.T. Input Plugin -Get metrics using the command line utility `smartctl` for S.M.A.R.T. (Self-Monitoring, Analysis and Reporting Technology) storage devices. SMART is a monitoring system included in computer hard disk drives (HDDs) and solid-state drives (SSDs)[1] that detects and reports on various indicators of drive reliability, with the intent of enabling the anticipation of hardware failures. +Get metrics using the command line utility `smartctl` for S.M.A.R.T. (Self-Monitoring, Analysis and Reporting Technology) storage devices. SMART is a monitoring system included in computer hard disk drives (HDDs) and solid-state drives (SSDs) that detects and reports on various indicators of drive reliability, with the intent of enabling the anticipation of hardware failures. See smartmontools (https://www.smartmontools.org/). SMART information is separated between different measurements: `smart_device` is used for general information, while `smart_attribute` stores the detailed attribute information if `attributes = true` is enabled in the plugin configuration. If no devices are specified, the plugin will scan for SMART devices via the following command: -``` +```bash smartctl --scan ``` Metrics will be reported from the following `smartctl` command: -``` +```bash smartctl --info --attributes --health -n --format=brief ``` This plugin supports _smartmontools_ version 5.41 and above, but v. 5.41 and v. 5.42 might require setting `nocheck`, see the comment in the sample configuration. +Also, NVMe capabilities were introduced in version 6.5. To enable SMART on a storage device run: -``` +```bash smartctl -s on ``` +## NVMe vendor specific attributes -### Configuration +For NVMe disk type, plugin can use command line utility `nvme-cli`. It has a feature +to easy access a vendor specific attributes. +This plugin supports nmve-cli version 1.5 and above (https://github.com/linux-nvme/nvme-cli). +In case of `nvme-cli` absence NVMe vendor specific metrics will not be obtained. -```toml -# Read metrics from storage devices supporting S.M.A.R.T. -[[inputs.smart]] - ## Optionally specify the path to the smartctl executable - # path = "/usr/bin/smartctl" +Vendor specific SMART metrics for NVMe disks may be reported from the following `nvme` command: - ## On most platforms smartctl requires root access. - ## Setting 'use_sudo' to true will make use of sudo to run smartctl. - ## Sudo must be configured to to allow the telegraf user to run smartctl - ## without a password. - # use_sudo = false - - ## Skip checking disks in this power mode. Defaults to - ## "standby" to not wake up disks that have stoped rotating. - ## See --nocheck in the man pages for smartctl. - ## smartctl version 5.41 and 5.42 have faulty detection of - ## power mode and might require changing this value to - ## "never" depending on your disks. - # nocheck = "standby" +```bash +nvme smart-log-add +``` - ## Gather all returned S.M.A.R.T. attribute metrics and the detailed - ## information from each drive into the `smart_attribute` measurement. - # attributes = false +Note that vendor plugins for `nvme-cli` could require different naming convention and report format. - ## Optionally specify devices to exclude from reporting. - # excludes = [ "/dev/pass6" ] +To see installed plugin extensions, depended on the nvme-cli version, look at the bottom of: +```bash +nvme help +``` - ## Optionally specify devices and device type, if unset - ## a scan (smartctl --scan) for S.M.A.R.T. devices will - ## done and all found will be included except for the - ## excluded in excludes. - # devices = [ "/dev/ada0 -d atacam" ] +To gather disk vendor id (vid) `id-ctrl` could be used: +```bash +nvme id-ctrl +``` +Association between a vid and company can be found there: https://pcisig.com/membership/member-companies. - ## Timeout for the smartctl command to complete. - # timeout = "30s" +Devices affiliation to being NVMe or non NVMe will be determined thanks to: +```bash +smartctl --scan +``` +and: +```bash +smartctl --scan -d nvme ``` -### Permissions +## Configuration -It's important to note that this plugin references smartctl, which may require additional permissions to execute successfully. -Depending on the user/group permissions of the telegraf user executing this plugin, you may need to use sudo. +```toml +# Read metrics from storage devices supporting S.M.A.R.T. +[[inputs.smart]] + ## Optionally specify the path to the smartctl executable + # path_smartctl = "/usr/bin/smartctl" + + ## Optionally specify the path to the nvme-cli executable + # path_nvme = "/usr/bin/nvme" + + ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case + ## ["auto-on"] - automatically find and enable additional vendor specific disk info + ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info + # enable_extensions = ["auto-on"] + + ## On most platforms used cli utilities requires root access. + ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. + ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli + ## without a password. + # use_sudo = false + + ## Skip checking disks in this power mode. Defaults to + ## "standby" to not wake up disks that have stopped rotating. + ## See --nocheck in the man pages for smartctl. + ## smartctl version 5.41 and 5.42 have faulty detection of + ## power mode and might require changing this value to + ## "never" depending on your disks. + # nocheck = "standby" + + ## Gather all returned S.M.A.R.T. attribute metrics and the detailed + ## information from each drive into the 'smart_attribute' measurement. + # attributes = false + + ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. + # excludes = [ "/dev/pass6" ] + + ## Optionally specify devices and device type, if unset + ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done + ## and all found will be included except for the excluded in excludes. + # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] + + ## Timeout for the cli command to complete. + # timeout = "30s" +``` +## Permissions + +It's important to note that this plugin references smartctl and nvme-cli, which may require additional permissions to execute successfully. +Depending on the user/group permissions of the telegraf user executing this plugin, you may need to use sudo. You will need the following in your telegraf config: ```toml @@ -80,13 +121,20 @@ You will need the following in your telegraf config: You will also need to update your sudoers file: ```bash $ visudo -# Add the following line: +# For smartctl add the following lines: Cmnd_Alias SMARTCTL = /usr/bin/smartctl telegraf ALL=(ALL) NOPASSWD: SMARTCTL Defaults!SMARTCTL !logfile, !syslog, !pam_session + +# For nvme-cli add the following lines: +Cmnd_Alias NVME = /path/to/nvme +telegraf ALL=(ALL) NOPASSWD: NVME +Defaults!NVME !logfile, !syslog, !pam_session ``` +To run smartctl or nvme with `sudo` wrapper script can be created. `path_smartctl` or +`path_nvme` in the configuration should be set to execute this script. -### Metrics +## Metrics - smart_device: - tags: @@ -135,37 +183,44 @@ The interpretation of the tag `flags` is: #### Exit Status -The `exit_status` field captures the exit status of the smartctl command which +The `exit_status` field captures the exit status of the used cli utilities command which is defined by a bitmask. For the interpretation of the bitmask see the man page for -smartctl. - -#### Device Names +smartctl or nvme-cli. +## Device Names Device names, e.g., `/dev/sda`, are *not persistent*, and may be -subject to change across reboots or system changes. Instead, you can the +subject to change across reboots or system changes. Instead, you can use the *World Wide Name* (WWN) or serial number to identify devices. On Linux block devices can be referenced by the WWN in the following location: `/dev/disk/by-id/`. - -To run `smartctl` with `sudo` create a wrapper script and use `path` in -the configuration to execute that. - -### Troubleshooting +## Troubleshooting +If you expect to see more SMART metrics than this plugin shows, be sure to use a proper version +of smartctl or nvme-cli utility which has the functionality to gather desired data. Also, check +your device capability because not every SMART metrics are mandatory. +For example the number of temperature sensors depends on the device specification. If this plugin is not working as expected for your SMART enabled device, please run these commands and include the output in a bug report: -``` + +For non NVMe devices (from smartctl version >= 7.0 this will also return NVMe devices by default): +```bash smartctl --scan ``` - -Run the following command replacing your configuration setting for NOCHECK and -the DEVICE from the previous command: +For NVMe devices: +```bash +smartctl --scan -d nvme ``` +Run the following command replacing your configuration setting for NOCHECK and +the DEVICE (name of the device could be taken from the previous command): +```bash smartctl --info --health --attributes --tolerance=verypermissive --nocheck NOCHECK --format=brief -d DEVICE ``` - -### Example Output - +If you try to gather vendor specific metrics, please provide this commad +and replace vendor and device to match your case: +``` +nvme VENDOR smart-log-add DEVICE +``` +## Example SMART Plugin Outputs ``` smart_device,enabled=Enabled,host=mbpro.local,device=rdisk0,model=APPLE\ SSD\ SM0512F,serial_no=S1K5NYCD964433,wwn=5002538655584d30,capacity=500277790720 udma_crc_errors=0i,exit_status=0i,health_ok=true,read_error_rate=0i,temp_c=40i 1502536854000000000 smart_attribute,capacity=500277790720,device=rdisk0,enabled=Enabled,fail=-,flags=-O-RC-,host=mbpro.local,id=199,model=APPLE\ SSD\ SM0512F,name=UDMA_CRC_Error_Count,serial_no=S1K5NYCD964433,wwn=5002538655584d30 exit_status=0i,raw_value=0i,threshold=0i,value=200i,worst=200i 1502536854000000000 diff --git a/src/writeData/components/telegrafPlugins/snmp.md b/src/writeData/components/telegrafPlugins/snmp.md index 4e9ce8e50d..0d2eb52ab4 100644 --- a/src/writeData/components/telegrafPlugins/snmp.md +++ b/src/writeData/components/telegrafPlugins/snmp.md @@ -130,9 +130,11 @@ formed with this option operate similarly way to the `snmptable` command. Control the handling of specific table columns using a nested `field`. These nested fields are specified similarly to a top-level `field`. -All columns of the SNMP table will be collected, it is not required to add a -nested field for each column, only those which you wish to modify. To exclude -columns use [metric filtering][]. +By default all columns of the SNMP table will be collected - it is not required +to add a nested field for each column, only those which you wish to modify. To +*only* collect certain columns, omit the `oid` from the `table` section and only +include `oid` settings in `field` sections. For more complex include/exclude +cases for columns use [metric filtering][]. One [metric][] is created for each row of the SNMP table. diff --git a/src/writeData/components/telegrafPlugins/snmp_trap.md b/src/writeData/components/telegrafPlugins/snmp_trap.md index 046f18e498..0680376c40 100644 --- a/src/writeData/components/telegrafPlugins/snmp_trap.md +++ b/src/writeData/components/telegrafPlugins/snmp_trap.md @@ -87,6 +87,7 @@ On Mac OS, listening on privileged ports is unrestricted on versions - version (string, "1" or "2c" or "3") - context_name (string, value from v3 trap) - engine_id (string, value from v3 trap) + - community (string, value from 1 or 2c trap) - fields: - Fields are mapped from variables in the trap. Field names are the trap variable names after MIB lookup. Field values are trap @@ -94,8 +95,8 @@ On Mac OS, listening on privileged ports is unrestricted on versions ### Example Output ``` -snmp_trap,mib=SNMPv2-MIB,name=coldStart,oid=.1.3.6.1.6.3.1.1.5.1,source=192.168.122.102,version=2c snmpTrapEnterprise.0="linux",sysUpTimeInstance=1i 1574109187723429814 -snmp_trap,mib=NET-SNMP-AGENT-MIB,name=nsNotifyShutdown,oid=.1.3.6.1.4.1.8072.4.0.2,source=192.168.122.102,version=2c sysUpTimeInstance=5803i,snmpTrapEnterprise.0="netSnmpNotificationPrefix" 1574109186555115459 +snmp_trap,mib=SNMPv2-MIB,name=coldStart,oid=.1.3.6.1.6.3.1.1.5.1,source=192.168.122.102,version=2c,community=public snmpTrapEnterprise.0="linux",sysUpTimeInstance=1i 1574109187723429814 +snmp_trap,mib=NET-SNMP-AGENT-MIB,name=nsNotifyShutdown,oid=.1.3.6.1.4.1.8072.4.0.2,source=192.168.122.102,version=2c,community=public sysUpTimeInstance=5803i,snmpTrapEnterprise.0="netSnmpNotificationPrefix" 1574109186555115459 ``` [net-snmp]: http://www.net-snmp.org/ diff --git a/src/writeData/components/telegrafPlugins/sqlserver.md b/src/writeData/components/telegrafPlugins/sqlserver.md index 320fee2755..7f7887769d 100644 --- a/src/writeData/components/telegrafPlugins/sqlserver.md +++ b/src/writeData/components/telegrafPlugins/sqlserver.md @@ -1,12 +1,18 @@ # SQL Server Input Plugin - The `sqlserver` plugin provides metrics for your SQL Server instance. It currently works with SQL Server 2008 SP3 and newer. Recorded metrics are lightweight and use Dynamic Management Views supplied by SQL Server. +### The SQL Server plugin supports the following editions/versions of SQL Server +- SQL Server + - 2008 SP3 (with CU3) + - SQL Server 2008 R2 SP3 and newer versions +- Azure SQL Database (Single) +- Azure SQL Managed Instance + ### Additional Setup: -You have to create a login on every instance you want to monitor, with following script: +You have to create a login on every SQL Server instance or Azure SQL Managed instance you want to monitor, with following script: ```sql USE master; GO @@ -46,17 +52,62 @@ GO # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", # ] + ## This enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 + ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. + ## Possible values for database_type are + ## "AzureSQLDB" + ## "SQLServer" + ## "AzureSQLManagedInstance" + # database_type = "AzureSQLDB" + ## Optional parameter, setting this to 2 will use a new version - ## of the collection queries that break compatibility with the original - ## dashboards. - ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB - query_version = 2 + ## of the collection queries that break compatibility with the original dashboards. + ## Version 2 - is compatible from SQL Server 2008 Sp3 and later versions and also for SQL Azure DB + ## Version 2 is in the process of being deprecated, please consider using database_type. + # query_version = 2 ## If you are using AzureDB, setting this to true will gather resource utilization metrics # azuredb = false - ## Possible queries - ## Version 2: + ## Possible queries accross different versions of the collectors + ## Queries enabled by default for specific Database Type + + ## database_type = AzureSQLDB by default collects the following queries + ## - AzureSQLDBWaitStats + ## - AzureSQLDBResourceStats + ## - AzureSQLDBResourceGovernance + ## - AzureSQLDBDatabaseIO + ## - AzureSQLDBServerProperties + ## - AzureSQLDBSQLOsWaitstats + ## - AzureSQLDBMemoryClerks + ## - AzureSQLDBPerformanceCounters + ## - AzureSQLDBRequests + ## - AzureSQLDBSchedulers + + ## database_type = AzureSQLManagedInstance by default collects the following queries + ## - AzureSQLMIResourceStats + ## - AzureSQLMIResourceGovernance + ## - AzureSQLMIDatabaseIO + ## - AzureSQLMIServerProperties + ## - AzureSQLMIOsWaitstats + ## - AzureSQLMIMemoryClerks + ## - AzureSQLMIPerformanceCounters + ## - AzureSQLMIDBRequests + ## - AzureSQLMISchedulers + + ## database_type = SQLServer by default collects the following queries + ## - SQLServerPerformanceCounters + ## - SQLServerWaitStatsCategorized + ## - SQLServerDatabaseIO + ## - SQLServerProperties + ## - SQLServerMemoryClerks + ## - SQLServerSchedulers + ## - SQLServerRequests + ## - SQLServerVolumeSpace + ## - SQLServerCpu + + ## Version 2 by default collects the following queries + ## Version 2 is being deprecated, please consider using database_type. ## - PerformanceCounters ## - WaitStatsCategorized ## - DatabaseIO @@ -66,7 +117,9 @@ GO ## - SqlRequests ## - VolumeSpace ## - Cpu - ## Version 1: + + ## Version 1 by default collects the following queries + ## Version 1 is deprecated, please consider using database_type. ## - PerformanceCounters ## - WaitStatsCategorized ## - CPUHistory @@ -78,11 +131,16 @@ GO ## - VolumeSpace ## - PerformanceMetrics + + ## A list of queries to include. If not specified, all the above listed queries are used. # include_query = [] ## A list of queries to explicitly ignore. exclude_query = [ 'Schedulers' , 'SqlRequests' ] + + + ``` ### Metrics: @@ -90,7 +148,7 @@ To provide backwards compatibility, this plugin support two versions of metrics **Note**: Version 2 queries are not backwards compatible with the old queries. Any dashboards or queries based on the old query format will not work with the new format. The version 2 queries only report raw metrics, no math has been done to calculate deltas. To graph this data you must calculate deltas in your dashboarding software. -#### Version 1 (deprecated in 1.6): +#### Version 1 (query_version=1): This is Deprecated in 1.6, all future development will be under configuration option database_type. The original metrics queries provide: - *Performance counters*: 1000+ metrics from `sys.dm_os_performance_counters` - *Performance metrics*: special performance and ratio metrics @@ -107,7 +165,7 @@ If you are using the original queries all stats have the following tags: - `servername`: hostname:instance - `type`: type of stats to easily filter measurements -#### Version 2: +#### Version 2 (query_version=2): Being deprecated, All future development will be under configuration option database_type. The new (version 2) metrics provide: - *Database IO*: IO stats from `sys.dm_io_virtual_file_stats` - *Memory Clerk*: Memory clerk breakdown from `sys.dm_os_memory_clerks`, most clerks have been given a friendly name. @@ -120,34 +178,82 @@ The new (version 2) metrics provide: - *Resource Governor*: CPU Usage, Requests/sec, Queued Requests, and Blocked tasks per workload group + more - *Server properties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevent properties such as Tier, #Vcores, Memory etc. - *Wait stats*: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. -- *Schedulers* - This captures sys.dm_os_schedulers. -- *SqlRequests* - This captures a snapshot of dm_exec_requests and - dm_exec_sessions that gives you running requests as well as wait types and +- *Schedulers* - This captures `sys.dm_os_schedulers`. +- *SqlRequests* - This captures a snapshot of `sys.dm_exec_requests` and `sys.dm_exec_sessions` that gives you running requests as well as wait types and blocking sessions. -- *VolumeSpace* - uses sys.dm_os_volume_stats to get total, used and occupied space on every disk that contains a data or log file. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). It is pointless to run this with high frequency (ie: every 10s), but it won't cause any problem. -- *Cpu* - uses the buffer ring (sys.dm_os_ring_buffers) to get CPU data, the table is updated once per minute. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). +- *VolumeSpace* - uses `sys.dm_os_volume_stats` to get total, used and occupied space on every disk that contains a data or log file. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). It is pointless to run this with high frequency (ie: every 10s), but it won't cause any problem. +- *Cpu* - uses the buffer ring (`sys.dm_os_ring_buffers`) to get CPU data, the table is updated once per minute. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). In order to allow tracking on a per statement basis this query produces a unique tag for each query. Depending on the database workload, this may result in a high cardinality series. Reference the FAQ for tips on [managing series cardinality][cardinality]. + - *Azure Managed Instances* - - Stats from `sys.server_resource_stats`: - - cpu_count - - server_memory - - sku - - engine_edition - - hardware_type - - total_storage_mb - - available_storage_mb - - uptime - - Resource governance stats from sys.dm_instance_resource_governance -- *Azure SQL Database* - - Stats from sys.dm_db_wait_stats - - Resource governance stats from sys.dm_user_db_resource_governance - - Stats from sys.dm_db_resource_stats - -The following metrics can be used directly, with no delta calculations: + - Stats from `sys.server_resource_stats` + - Resource governance stats from `sys.dm_instance_resource_governance` +- *Azure SQL Database* in addition to other stats + - Stats from `sys.dm_db_wait_stats` + - Resource governance stats from `sys.dm_user_db_resource_governance` + - Stats from `sys.dm_db_resource_stats` + + + +#### database_type = "AzureSQLDB +These are metrics for Azure SQL Database (single database) and are very similar to version 2 but split out for maintenance reasons, better ability to test,differences in DMVs: +- AzureSQLDBDatabaseIO: IO stats from `sys.dm_io_virtual_file_stats` including resource governance time, RBPEX, IO for Hyperscale. +- AzureSQLDBMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`. += AzureSQLDBResourceGovernance: Relevant properties indicatign resource limits from `sys.dm_user_db_resource_governance` +- AzureSQLDBPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. +- AzureSQLDBServerProperties: Relevant Azure SQL relevent properties from such as Tier, #Vcores, Memory etc, storage, etc. +- AzureSQLDBWaitstats: Wait time in ms from `sys.dm_db_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected only as of the end of the a statement. and for a specific database only. +- *AzureSQLOsWaitstats*: Wait time in ms from `sys.dm_os_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected as they occur and instance wide +- *AzureSQLDBRequests: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests` +- *AzureSQLDBSchedulers* - This captures `sys.dm_os_schedulers` snapshots. + + +#### database_type = "AzureSQLManagedInstance +These are metrics for Azure SQL Managed instance, are very similar to version 2 but split out for maintenance reasons, better ability to test, differences in DMVs: +- AzureSQLMIDatabaseIO: IO stats from `sys.dm_io_virtual_file_stats` including resource governance time, RBPEX, IO for Hyperscale. +- AzureSQLMIMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`. +- AzureSQLMIResourceGovernance: Relevant properties indicatign resource limits from `sys.dm_instance_resource_governance` +- AzureSQLMIPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. +- AzureSQLMIServerProperties: Relevant Azure SQL relevent properties such as Tier, #Vcores, Memory etc, storage, etc. +- AzureSQLMIOsWaitstats: Wait time in ms from `sys.dm_os_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected as they occur and instance wide +- AzureSQLMIRequests: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests` +- AzureSQLMISchedulers - This captures `sys.dm_os_schedulers` snapshots. + +#### database_type = "SQLServer +- SQLServerDatabaseIO: IO stats from `sys.dm_io_virtual_file_stats` +- SQLServerMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`, most clerks have been given a friendly name. +- SQLServerPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters`. Some of the important metrics included: + - *Activity*: Transactions/sec/database, Batch requests/sec, blocked processes, + more + - *Availability Groups*: Bytes sent to replica, Bytes received from replica, Log bytes received, Log send queue, transaction delay, + more + - *Log activity*: Log bytes flushed/sec, Log flushes/sec, Log Flush Wait Time + - *Memory*: PLE, Page reads/sec, Page writes/sec, + more + - *TempDB*: Free space, Version store usage, Active temp tables, temp table creation rate, + more + - *Resource Governor*: CPU Usage, Requests/sec, Queued Requests, and Blocked tasks per workload group + more +- SQLServerProperties: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevent properties such as Tier, #Vcores, Memory etc. +- SQLServerWaitStatsCategorized: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. +- SQLServerSchedulers - This captures `sys.dm_os_schedulers`. +- SQLServerRequests - This captures a snapshot of `sys.dm_exec_requests` and `sys.dm_exec_sessions` that gives you running requests as well as wait types and + blocking sessions. +- SQLServerVolumeSpace - uses `sys.dm_os_volume_stats` to get total, used and occupied space on every disk that contains a data or log file. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). It is pointless to run this with high frequency (ie: every 10s), but it won't cause any problem. +- SQLServerCpu - uses the buffer ring (`sys.dm_os_ring_buffers`) to get CPU data, the table is updated once per minute. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). + + +#### Output Measures +The guiding principal is that all data collected from the same primary DMV ends up in the same measure irrespective of database_type. +`sqlserver_database_io` - Used by AzureSQLDBDatabaseIO, AzureSQLMIDatabaseIO, SQLServerDatabaseIO, DatabaseIO given the data is from `sys.dm_io_virtual_file_stats` +`sqlserver_waitstats` - Used by WaitStatsCategorized,AzureSQLDBOsWaitstats,AzureSQLMIOsWaitstats +`sqlserver_server_properties` - Used by SQLServerProperties, AzureSQLDBServerProperties , AzureSQLMIServerProperties,ServerProperties +`sqlserver_memory_clerks` - Used by SQLServerMemoryClerks, AzureSQLDBMemoryClerks, AzureSQLMIMemoryClerks,MemoryClerk +`sqlserver_performance` - Used by SQLServerPerformanceCounters, AzureSQLDBPerformanceCounters, AzureSQLMIPerformanceCounters,PerformanceCounters +`sys.dm_os_schedulers` - Used by SQLServerSchedulers,AzureSQLDBServerSchedulers, AzureSQLMIServerSchedulers + + + +The following Performance counter metrics can be used directly, with no delta calculations: - SQLServer:Buffer Manager\Buffer cache hit ratio - SQLServer:Buffer Manager\Page life expectancy - SQLServer:Buffer Node\Page life expectancy @@ -185,6 +291,6 @@ The following metrics can be used directly, with no delta calculations: Version 2 queries have the following tags: - `sql_instance`: Physical host and instance name (hostname:instance) -- database_name: For Azure SQLDB, database_name denotes the name of the Azure SQL Database as server name is a logical construct. +- `database_name`: For Azure SQLDB, database_name denotes the name of the Azure SQL Database as server name is a logical construct. [cardinality]: /docs/FAQ.md#user-content-q-how-can-i-manage-series-cardinality diff --git a/src/writeData/components/telegrafPlugins/tail.md b/src/writeData/components/telegrafPlugins/tail.md index 1be8a5e93a..7f5315038a 100644 --- a/src/writeData/components/telegrafPlugins/tail.md +++ b/src/writeData/components/telegrafPlugins/tail.md @@ -62,6 +62,23 @@ The plugin expects messages in one of the ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + + ## multiline parser/codec + ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html + #[inputs.tail.multiline] + ## The pattern should be a regexp which matches what you believe to be an indicator that the field is part of an event consisting of multiple lines of log data. + #pattern = "^\s" + + ## The field's value must be previous or next and indicates the relation to the + ## multi-line event. + #match_which_line = "previous" + + ## The invert_match can be true or false (defaults to false). + ## If true, a message not matching the pattern will constitute a match of the multiline filter and the what will be applied. (vice-versa is also true) + #invert_match = false + + #After the specified timeout, this plugin sends the multiline event even if no new pattern is found to start a new event. The default is 5s. + #timeout = 5s ``` ### Metrics diff --git a/src/writeData/components/telegrafPlugins/win_eventlog.md b/src/writeData/components/telegrafPlugins/win_eventlog.md new file mode 100644 index 0000000000..5551d22b41 --- /dev/null +++ b/src/writeData/components/telegrafPlugins/win_eventlog.md @@ -0,0 +1,209 @@ +# Windows Eventlog Input Plugin + +## Collect Windows Event Log messages + +Supports Windows Vista and higher. + +Telegraf should have Administrator permissions to subscribe for some of the Windows Events Channels, like System Log. + +### Configuration + +```toml + ## Telegraf should have Administrator permissions to subscribe for some Windows Events channels + ## (System log, for example) + + ## LCID (Locale ID) for event rendering + ## 1033 to force English language + ## 0 to use default Windows locale + # locale = 0 + + ## Name of eventlog, used only if xpath_query is empty + ## Example: "Application" + # eventlog_name = "" + + ## xpath_query can be in defined short form like "Event/System[EventID=999]" + ## or you can form a XML Query. Refer to the Consuming Events article: + ## https://docs.microsoft.com/en-us/windows/win32/wes/consuming-events + ## XML query is the recommended form, because it is most flexible + ## You can create or debug XML Query by creating Custom View in Windows Event Viewer + ## and then copying resulting XML here + xpath_query = ''' + + + + *[System[( (EventID >= 5152 and EventID <= 5158) or EventID=5379 or EventID=4672)]] + + + + + + + + + + + + + + + ''' + + ## System field names: + ## "Source", "EventID", "Version", "Level", "Task", "Opcode", "Keywords", "TimeCreated", + ## "EventRecordID", "ActivityID", "RelatedActivityID", "ProcessID", "ThreadID", "ProcessName", + ## "Channel", "Computer", "UserID", "UserName", "Message", "LevelText", "TaskText", "OpcodeText" + + ## In addition to System, Data fields can be unrolled from additional XML nodes in event. + ## Human-readable representation of those nodes is formatted into event Message field, + ## but XML is more machine-parsable + + # Process UserData XML to fields, if this node exists in Event XML + process_userdata = true + + # Process EventData XML to fields, if this node exists in Event XML + process_eventdata = true + + ## Separator character to use for unrolled XML Data field names + separator = "_" + + ## Get only first line of Message field. For most events first line is usually more than enough + only_first_line_of_message = true + + ## Fields to include as tags. Globbing supported ("Level*" for both "Level" and "LevelText") + event_tags = ["Source", "EventID", "Level", "LevelText", "Task", "TaskText", "Opcode", "OpcodeText", "Keywords", "Channel", "Computer"] + + ## Default list of fields to send. All fields are sent by default. Globbing supported + event_fields = ["*"] + + ## Fields to exclude. Also applied to data fields. Globbing supported + exclude_fields = ["Binary", "Data_Address*"] + + ## Skip those tags or fields if their value is empty or equals to zero. Globbing supported + exclude_empty = ["*ActivityID", "UserID"] +``` + +### Filtering + +There are three types of filtering: **Event Log** name, **XPath Query** and **XML Query**. + +**Event Log** name filtering is simple: + +```toml + eventlog_name = "Application" + xpath_query = ''' +``` + +For **XPath Query** filtering set the `xpath_query` value, and `eventlog_name` will be ignored: + +```toml + eventlog_name = "" + xpath_query = "Event/System[EventID=999]" +``` + +**XML Query** is the most flexible: you can Select or Suppress any values, and give ranges for other values. XML query is the recommended form, because it is most flexible. You can create or debug XML Query by creating Custom View in Windows Event Viewer and then copying resulting XML in config file. + +XML Query documentation: + + + +### Metrics + +You can send any field, *System*, *Computed* or *XML* as tag field. List of those fields is in the `event_tags` config array. Globbing is supported in this array, i.e. `Level*` for all fields beginning with `Level`, or `L?vel` for all fields where the name is `Level`, `L3vel`, `L@vel` and so on. Tag fields are converted to strings automatically. + +By default, all other fields are sent, but you can limit that either by listing it in `event_fields` config array with globbing, or by adding some field name masks in the `exclude_fields` config array. + +You can limit sending fields with empty values by adding masks of names of such fields in the `exclude_empty` config array. Value considered empty, if the System field of type `int` or `uint32` is equal to zero, or if any field of type `string` is an empty string. + +List of System fields: + +- Source (string) +- EventID (int) +- Version (int) +- Level (int) +- LevelText (string) +- Opcode (int) +- OpcodeText (string) +- Task (int) +- TaskText (string) +- Keywords (string): comma-separated in case of multiple values +- TimeCreated (string) +- EventRecordID (string) +- ActivityID (string) +- RelatedActivityID (string) +- ProcessID (int) +- ThreadID (int) +- ProcessName (string): derived from ProcessID +- Channel (string) +- Computer (string): useful if consumed from Forwarded Events +- UserID (string): SID +- UserName (string): derived from UserID, presented in form of DOMAIN\Username +- Message (string) + +### Computed fields + +Fields `Level`, `Opcode` and `Task` are converted to text and saved as computed `*Text` fields. + +`Keywords` field is converted from hex uint64 value by the `_EvtFormatMessage` WINAPI function. There can be more than one value, in that case they will be comma-separated. If keywords can't be converted (bad device driver or forwarded from another computer with unknown Event Channel), hex uint64 is saved as is. + +`ProcessName` field is found by looking up ProcessID. Can be empty if telegraf doesn't have enough permissions. + +`Username` field is found by looking up SID from UserID. + +`Message` field is rendered from the event data, and can be several kilobytes of text with line breaks. For most events the first line of this text is more then enough, and additional info is more useful to be parsed as XML fields. So, for brevity, plugin takes only the first line. You can set `only_first_line_of_message` parameter to `false` to take full message text. + +### Additional Fields + +The content of **Event Data** and **User Data** XML Nodes can be added as additional fields, and is added by default. You can disable that by setting `process_userdata` or `process_eventdata` parameters to `false`. + +For the fields from additional XML Nodes the `Name` attribute is taken as the name, and inner text is the value. Type of those fields is always string. + +Name of the field is formed from XML Path by adding _ inbetween levels. For example, if UserData XML looks like this: + +```xml + + + KB4566782 + 5112 + Installed + 0x0 + UpdateAgentLCU + + +``` + +It will be converted to following fields: + +```text +CbsPackageChangeState_PackageIdentifier = "KB4566782" +CbsPackageChangeState_IntendedPackageState = "5112" +CbsPackageChangeState_IntendedPackageStateTextized = "Installed" +CbsPackageChangeState_ErrorCode = "0x0" +CbsPackageChangeState_Client = "UpdateAgentLCU" +``` + +If there are more than one field with the same name, all those fields are given suffix with number: `_1`, `_2` and so on. + +### Localization + +Human readable Event Description is in the Message field. But it is better to be skipped in favour of the Event XML values, because they are more machine-readable. + +Keywords, LevelText, TaskText, OpcodeText and Message are saved with the current Windows locale by default. You can override this, for example, to English locale by setting `locale` config parameter to `1033`. Unfortunately, **Event Data** and **User Data** XML Nodes are in default Windows locale only. + +Locale should be present on the computer. English locale is usually available on all localized versions of modern Windows. List of locales: + + + +### Example Output + +Some values are changed for anonymity. + +```text +win_eventlog,Channel=System,Computer=PC,EventID=105,Keywords=0x8000000000000000,Level=4,LevelText=Information,Opcode=10,OpcodeText=General,Source=WudfUsbccidDriver,Task=1,TaskText=Driver,host=PC ProcessName="WUDFHost.exe",UserName="NT AUTHORITY\\LOCAL SERVICE",Data_dwMaxCCIDMessageLength="271",Data_bPINSupport="0x0",Data_bMaxCCIDBusySlots="1",EventRecordID=1914688i,UserID="S-1-5-19",Version=0i,Data_bClassGetEnvelope="0x0",Data_wLcdLayout="0x0",Data_bClassGetResponse="0x0",TimeCreated="2020-08-21T08:43:26.7481077Z",Message="The Smartcard reader reported the following class descriptor (part 2)." 1597999410000000000 + +win_eventlog,Channel=Security,Computer=PC,EventID=4798,Keywords=Audit\ Success,Level=0,LevelText=Information,Opcode=0,OpcodeText=Info,Source=Microsoft-Windows-Security-Auditing,Task=13824,TaskText=User\ Account\ Management,host=PC Data_TargetDomainName="PC",Data_SubjectUserName="User",Data_CallerProcessId="0x3d5c",Data_SubjectLogonId="0x46d14f8d",Version=0i,EventRecordID=223157i,Message="A user's local group membership was enumerated.",Data_TargetUserName="User",Data_TargetSid="S-1-5-21-.-.-.-1001",Data_SubjectUserSid="S-1-5-21-.-.-.-1001",Data_CallerProcessName="C:\\Windows\\explorer.exe",ActivityID="{0d4cc11d-7099-0002-4dc1-4c0d9970d601}",UserID="",Data_SubjectDomainName="PC",TimeCreated="2020-08-21T08:43:27.3036771Z",ProcessName="lsass.exe" 1597999410000000000 + +win_eventlog,Channel=Microsoft-Windows-Dhcp-Client/Admin,Computer=PC,EventID=1002,Keywords=0x4000000000000001,Level=2,LevelText=Error,Opcode=76,OpcodeText=IpLeaseDenied,Source=Microsoft-Windows-Dhcp-Client,Task=3,TaskText=Address\ Configuration\ State\ Event,host=PC Version=0i,Message="The IP address lease 10.20.30.40 for the Network Card with network address 0xaabbccddeeff has been denied by the DHCP server 10.20.30.1 (The DHCP Server sent a DHCPNACK message).",UserID="S-1-5-19",Data_HWLength="6",Data_HWAddress="545595B7EA01",TimeCreated="2020-08-21T08:43:42.8265853Z",EventRecordID=34i,ProcessName="svchost.exe",UserName="NT AUTHORITY\\LOCAL SERVICE" 1597999430000000000 + +win_eventlog,Channel=System,Computer=PC,EventID=10016,Keywords=Classic,Level=3,LevelText=Warning,Opcode=0,OpcodeText=Info,Source=Microsoft-Windows-DistributedCOM,Task=0,host=PC Data_param3="Активация",Data_param6="PC",Data_param8="S-1-5-21-2007059868-50816014-3139024325-1001",Version=0i,UserName="PC\\User",Data_param1="по умолчанию для компьютера",Data_param2="Локально",Data_param7="User",Data_param9="LocalHost (с использованием LRPC)",Data_param10="Microsoft.Windows.ShellExperienceHost_10.0.19041.423_neutral_neutral_cw5n1h2txyewy",ActivityID="{839cac9e-73a1-4559-a847-62f3a5e73e44}",ProcessName="svchost.exe",Message="The по умолчанию для компьютера permission settings do not grant Локально Активация permission for the COM Server application with CLSID ",Data_param5="{316CDED5-E4AE-4B15-9113-7055D84DCC97}",Data_param11="S-1-15-2-.-.-.-.-.-.-2861478708",TimeCreated="2020-08-21T08:43:45.5233759Z",EventRecordID=1914689i,UserID="S-1-5-21-.-.-.-1001",Data_param4="{C2F03A33-21F5-47FA-B4BB-156362A2F239}" 1597999430000000000 + +``` diff --git a/src/writeData/constants/contentTelegrafPlugins.ts b/src/writeData/constants/contentTelegrafPlugins.ts index b669bde610..3c72b9b6e0 100644 --- a/src/writeData/constants/contentTelegrafPlugins.ts +++ b/src/writeData/constants/contentTelegrafPlugins.ts @@ -65,6 +65,7 @@ import httpMarkdown from 'src/writeData/components/telegrafPlugins/http.md' import icinga2Markdown from 'src/writeData/components/telegrafPlugins/icinga2.md' import infinibandMarkdown from 'src/writeData/components/telegrafPlugins/infiniband.md' import influxdb_listenerMarkdown from 'src/writeData/components/telegrafPlugins/influxdb_listener.md' +import influxdb_v2_listenerMarkdown from 'src/writeData/components/telegrafPlugins/influxdb_v2_listener.md' import influxdbMarkdown from 'src/writeData/components/telegrafPlugins/influxdb.md' import internalMarkdown from 'src/writeData/components/telegrafPlugins/internal.md' import interruptsMarkdown from 'src/writeData/components/telegrafPlugins/interrupts.md' @@ -113,12 +114,14 @@ import nginx_plusMarkdown from 'src/writeData/components/telegrafPlugins/nginx_p import nginx_stsMarkdown from 'src/writeData/components/telegrafPlugins/nginx_sts.md' import nginx_upstream_checkMarkdown from 'src/writeData/components/telegrafPlugins/nginx_upstream_check.md' import nginx_vtsMarkdown from 'src/writeData/components/telegrafPlugins/nginx_vts.md' +import nsdMarkdown from 'src/writeData/components/telegrafPlugins/nsd.md' import nginxMarkdown from 'src/writeData/components/telegrafPlugins/nginx.md' import nsq_consumerMarkdown from 'src/writeData/components/telegrafPlugins/nsq_consumer.md' import nsqMarkdown from 'src/writeData/components/telegrafPlugins/nsq.md' import nstatMarkdown from 'src/writeData/components/telegrafPlugins/nstat.md' import ntpqMarkdown from 'src/writeData/components/telegrafPlugins/ntpq.md' import nvidia_smiMarkdown from 'src/writeData/components/telegrafPlugins/nvidia_smi.md' +import opcuaMarkdown from 'src/writeData/components/telegrafPlugins/opcua.md' import openldapMarkdown from 'src/writeData/components/telegrafPlugins/openldap.md' import openntpdMarkdown from 'src/writeData/components/telegrafPlugins/openntpd.md' import opensmtpdMarkdown from 'src/writeData/components/telegrafPlugins/opensmtpd.md' @@ -175,6 +178,7 @@ import uwsgiMarkdown from 'src/writeData/components/telegrafPlugins/uwsgi.md' import varnishMarkdown from 'src/writeData/components/telegrafPlugins/varnish.md' import vsphereMarkdown from 'src/writeData/components/telegrafPlugins/vsphere.md' import webhooksMarkdown from 'src/writeData/components/telegrafPlugins/webhooks.md' +import win_eventlogMarkdown from 'src/writeData/components/telegrafPlugins/win_eventlog.md' import win_perf_countersMarkdown from 'src/writeData/components/telegrafPlugins/win_perf_counters.md' import win_servicesMarkdown from 'src/writeData/components/telegrafPlugins/win_services.md' import wireguardMarkdown from 'src/writeData/components/telegrafPlugins/wireguard.md' @@ -245,6 +249,7 @@ import httpLogo from 'src/writeData/graphics/http.svg' import icinga2Logo from 'src/writeData/graphics/icinga2.svg' import infinibandLogo from 'src/writeData/graphics/infiniband.svg' import influxdb_listenerLogo from 'src/writeData/graphics/influxdb_listener.svg' +import influxdb_v2_listenerLogo from 'src/writeData/graphics/influxdb_v2_listener.svg' import influxdbLogo from 'src/writeData/graphics/influxdb.svg' import internalLogo from 'src/writeData/graphics/internal.svg' import interruptsLogo from 'src/writeData/graphics/interrupts.svg' @@ -293,12 +298,14 @@ import nginx_plusLogo from 'src/writeData/graphics/nginx_plus.svg' import nginx_stsLogo from 'src/writeData/graphics/nginx_sts.svg' import nginx_upstream_checkLogo from 'src/writeData/graphics/nginx_upstream_check.svg' import nginx_vtsLogo from 'src/writeData/graphics/nginx_vts.svg' +import nsdLogo from 'src/writeData/graphics/nsd.svg' import nginxLogo from 'src/writeData/graphics/nginx.svg' import nsq_consumerLogo from 'src/writeData/graphics/nsq_consumer.svg' import nsqLogo from 'src/writeData/graphics/nsq.svg' import nstatLogo from 'src/writeData/graphics/nstat.svg' import ntpqLogo from 'src/writeData/graphics/ntpq.svg' import nvidia_smiLogo from 'src/writeData/graphics/nvidia_smi.svg' +import opcuaLogo from 'src/writeData/graphics/opcua.svg' import openldapLogo from 'src/writeData/graphics/openldap.svg' import openntpdLogo from 'src/writeData/graphics/openntpd.svg' import opensmtpdLogo from 'src/writeData/graphics/opensmtpd.svg' @@ -355,6 +362,7 @@ import uwsgiLogo from 'src/writeData/graphics/uwsgi.svg' import varnishLogo from 'src/writeData/graphics/varnish.svg' import vsphereLogo from 'src/writeData/graphics/vsphere.svg' import webhooksLogo from 'src/writeData/graphics/webhooks.svg' +import win_eventlogLogo from 'src/writeData/graphics/win_eventlog.svg' import win_perf_countersLogo from 'src/writeData/graphics/win_perf_counters.svg' import win_servicesLogo from 'src/writeData/graphics/win_services.svg' import wireguardLogo from 'src/writeData/graphics/wireguard.svg' @@ -792,6 +800,13 @@ export const WRITE_DATA_TELEGRAF_PLUGINS: WriteDataItem[] = [ markdown: influxdb_listenerMarkdown, image: influxdb_listenerLogo, }, + { + id: 'influxdb_v2_listener', + name: 'InfluxDB V2 Listener', + url: `${TELEGRAF_PLUGINS}/influxdb_v2_listener`, + markdown: influxdb_v2_listenerMarkdown, + image: influxdb_v2_listenerLogo, + }, { id: 'internal', name: 'Telegraf Internal', @@ -1128,6 +1143,13 @@ export const WRITE_DATA_TELEGRAF_PLUGINS: WriteDataItem[] = [ markdown: nginx_vtsMarkdown, image: nginx_vtsLogo, }, + { + id: 'nsd', + name: 'NSD', + url: `${TELEGRAF_PLUGINS}/nsd`, + markdown: nsdMarkdown, + image: nsdLogo, + }, { id: 'nsq', name: 'NSQ', @@ -1163,6 +1185,13 @@ export const WRITE_DATA_TELEGRAF_PLUGINS: WriteDataItem[] = [ markdown: nvidia_smiMarkdown, image: nvidia_smiLogo, }, + { + id: 'opcua', + name: 'OPC UA Client', + url: `${TELEGRAF_PLUGINS}/opcua`, + markdown: opcuaMarkdown, + image: opcuaLogo, + }, { id: 'openldap', name: 'OpenLDAP', @@ -1555,6 +1584,13 @@ export const WRITE_DATA_TELEGRAF_PLUGINS: WriteDataItem[] = [ markdown: webhooksMarkdown, image: webhooksLogo, }, + { + id: 'win_eventlog', + name: 'Windows Eventlog', + url: `${TELEGRAF_PLUGINS}/win_eventlog`, + markdown: win_eventlogMarkdown, + image: win_eventlogLogo, + }, { id: 'win_perf_counters', name: 'Windows Performance Counters', diff --git a/src/writeData/graphics/influxdb_v2_listener.svg b/src/writeData/graphics/influxdb_v2_listener.svg new file mode 100644 index 0000000000..b1986ad9c2 --- /dev/null +++ b/src/writeData/graphics/influxdb_v2_listener.svg @@ -0,0 +1,21 @@ + + + + + + diff --git a/src/writeData/graphics/nsd.svg b/src/writeData/graphics/nsd.svg new file mode 100644 index 0000000000..74f0def779 --- /dev/null +++ b/src/writeData/graphics/nsd.svg @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/writeData/graphics/opcua.svg b/src/writeData/graphics/opcua.svg new file mode 100644 index 0000000000..5350e09ece --- /dev/null +++ b/src/writeData/graphics/opcua.svg @@ -0,0 +1,353 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + opcAsset 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/writeData/graphics/win_eventlog.svg b/src/writeData/graphics/win_eventlog.svg new file mode 100644 index 0000000000..0d47e89ab9 --- /dev/null +++ b/src/writeData/graphics/win_eventlog.svg @@ -0,0 +1 @@ + \ No newline at end of file From ba3711a6313939faaa1df4df8904d87b57f6775e Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Tue, 29 Sep 2020 12:57:18 -0700 Subject: [PATCH 2/5] fix(arduino): revert typo fix --- src/writeData/components/clientLibraries/Arduino.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/writeData/components/clientLibraries/Arduino.md b/src/writeData/components/clientLibraries/Arduino.md index 1f4d000029..26c5e8ad54 100644 --- a/src/writeData/components/clientLibraries/Arduino.md +++ b/src/writeData/components/clientLibraries/Arduino.md @@ -1,4 +1,4 @@ -For more detailed and up to date information check out the [GitHub Respository](https://github.com/tobiasschuerg/InfluxDB-Client-for-Arduino) +For more detailed and up to date information check out the [GitHub Repository](https://github.com/tobiasschuerg/InfluxDB-Client-for-Arduino) ##### Install Library From becfffd8525140138a81e13ca7922b4d847f031e Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Tue, 29 Sep 2020 13:07:59 -0700 Subject: [PATCH 3/5] chore(changelog): added new sources --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9cfc7563ee..dac1fa5477 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,3 +3,4 @@ ### Features 1. [106](https://github.com/influxdata/ui/pull/106): Community Templates is now enabled for everyone. Go to settings -> templates. +1. [126](https://github.com/influxdata/ui/pull/126): Added InfluxDB v2 Listener, NSD, OPC-UA, and Windows Event Log to the sources page From 35fc4740e956043b0a60ce5408b03031183bbae1 Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Thu, 1 Oct 2020 10:28:30 -0700 Subject: [PATCH 4/5] fix(winEventLog): fixing typo in win_eventlog readme --- src/writeData/components/telegrafPlugins/win_eventlog.md | 1 + 1 file changed, 1 insertion(+) diff --git a/src/writeData/components/telegrafPlugins/win_eventlog.md b/src/writeData/components/telegrafPlugins/win_eventlog.md index 5551d22b41..9f48fd9ac3 100644 --- a/src/writeData/components/telegrafPlugins/win_eventlog.md +++ b/src/writeData/components/telegrafPlugins/win_eventlog.md @@ -9,6 +9,7 @@ Telegraf should have Administrator permissions to subscribe for some of the Wind ### Configuration ```toml +[[inputs.win_eventlog]] ## Telegraf should have Administrator permissions to subscribe for some Windows Events channels ## (System log, for example) From 5ea03cf9f3ccbf93edf5ccbf91fd3124d0d84e8c Mon Sep 17 00:00:00 2001 From: Russ Savage Date: Wed, 14 Oct 2020 11:26:49 -0700 Subject: [PATCH 5/5] fix(writeData): adding intel_rdt and ras --- .../components/clientLibraries/CSharp.md | 2 +- .../components/clientLibraries/Go.md | 2 +- .../components/clientLibraries/Java.md | 2 +- .../components/clientLibraries/Kotlin.md | 2 +- .../components/clientLibraries/Node.md | 2 +- .../components/clientLibraries/PHP.md | 2 +- .../components/clientLibraries/Python.md | 2 +- .../components/clientLibraries/Ruby.md | 2 +- .../components/clientLibraries/Scala.md | 2 +- .../components/telegrafPlugins/cloudwatch.md | 9 +- .../components/telegrafPlugins/consul.md | 26 +- .../components/telegrafPlugins/exec.md | 2 +- .../telegrafPlugins/http_response.md | 8 +- .../telegrafPlugins/influxdb_v2_listener.md | 5 +- .../components/telegrafPlugins/intel_rdt.md | 108 ++ .../components/telegrafPlugins/proxmox.md | 2 + .../components/telegrafPlugins/ras.md | 58 + .../components/telegrafPlugins/redis.md | 5 + .../components/telegrafPlugins/smart.md | 22 +- .../components/telegrafPlugins/snmp.md | 3 + .../components/telegrafPlugins/sqlserver.md | 4 +- .../telegrafPlugins/win_eventlog.md | 2 + .../constants/contentTelegrafPlugins.ts | 18 + src/writeData/graphics/intel_rdt.svg | 31 + src/writeData/graphics/ras.svg | 1532 +++++++++++++++++ 25 files changed, 1821 insertions(+), 32 deletions(-) create mode 100644 src/writeData/components/telegrafPlugins/intel_rdt.md create mode 100644 src/writeData/components/telegrafPlugins/ras.md create mode 100644 src/writeData/graphics/intel_rdt.svg create mode 100644 src/writeData/graphics/ras.svg diff --git a/src/writeData/components/clientLibraries/CSharp.md b/src/writeData/components/clientLibraries/CSharp.md index 6e9e894e70..b61d03e35b 100644 --- a/src/writeData/components/clientLibraries/CSharp.md +++ b/src/writeData/components/clientLibraries/CSharp.md @@ -1,4 +1,4 @@ -For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-csharp) +For more detailed and up to date information check out the [GitHub Repository](https://github.com/influxdata/influxdb-client-csharp) ##### Install Package diff --git a/src/writeData/components/clientLibraries/Go.md b/src/writeData/components/clientLibraries/Go.md index fbf1f87d1f..772474f257 100644 --- a/src/writeData/components/clientLibraries/Go.md +++ b/src/writeData/components/clientLibraries/Go.md @@ -1,4 +1,4 @@ -For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-go) +For more detailed and up to date information check out the [GitHub Repository](https://github.com/influxdata/influxdb-client-go) ##### Initialize the Client diff --git a/src/writeData/components/clientLibraries/Java.md b/src/writeData/components/clientLibraries/Java.md index 0eb0ba0c74..6ddff8802e 100644 --- a/src/writeData/components/clientLibraries/Java.md +++ b/src/writeData/components/clientLibraries/Java.md @@ -1,4 +1,4 @@ -For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-java) +For more detailed and up to date information check out the [GitHub Repository](https://github.com/influxdata/influxdb-client-java) ##### Add Dependency diff --git a/src/writeData/components/clientLibraries/Kotlin.md b/src/writeData/components/clientLibraries/Kotlin.md index 84b8ecf744..9c0ff2c190 100644 --- a/src/writeData/components/clientLibraries/Kotlin.md +++ b/src/writeData/components/clientLibraries/Kotlin.md @@ -1,4 +1,4 @@ -For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-java/tree/master/client-kotlin) +For more detailed and up to date information check out the [GitHub Repository](https://github.com/influxdata/influxdb-client-java/tree/master/client-kotlin) ##### Add Dependency diff --git a/src/writeData/components/clientLibraries/Node.md b/src/writeData/components/clientLibraries/Node.md index 955a99d969..ca115916fd 100644 --- a/src/writeData/components/clientLibraries/Node.md +++ b/src/writeData/components/clientLibraries/Node.md @@ -1,4 +1,4 @@ -For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-js) +For more detailed and up to date information check out the [GitHub Repository](https://github.com/influxdata/influxdb-client-js) ##### Install via NPM diff --git a/src/writeData/components/clientLibraries/PHP.md b/src/writeData/components/clientLibraries/PHP.md index 8db4f0d80f..e0ad732e98 100644 --- a/src/writeData/components/clientLibraries/PHP.md +++ b/src/writeData/components/clientLibraries/PHP.md @@ -1,4 +1,4 @@ -For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-php) +For more detailed and up to date information check out the [GitHub Repository](https://github.com/influxdata/influxdb-client-php) ##### Install via Composer diff --git a/src/writeData/components/clientLibraries/Python.md b/src/writeData/components/clientLibraries/Python.md index 9863715faa..0291848ee2 100644 --- a/src/writeData/components/clientLibraries/Python.md +++ b/src/writeData/components/clientLibraries/Python.md @@ -1,4 +1,4 @@ -For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-python) +For more detailed and up to date information check out the [GitHub Repository](https://github.com/influxdata/influxdb-client-python) ##### Install Package diff --git a/src/writeData/components/clientLibraries/Ruby.md b/src/writeData/components/clientLibraries/Ruby.md index fc0f78293a..3bce15faae 100644 --- a/src/writeData/components/clientLibraries/Ruby.md +++ b/src/writeData/components/clientLibraries/Ruby.md @@ -1,4 +1,4 @@ -For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-ruby) +For more detailed and up to date information check out the [GitHub Repository](https://github.com/influxdata/influxdb-client-ruby) ##### Install the Gem diff --git a/src/writeData/components/clientLibraries/Scala.md b/src/writeData/components/clientLibraries/Scala.md index 1a7b3a3125..4464efd3c8 100644 --- a/src/writeData/components/clientLibraries/Scala.md +++ b/src/writeData/components/clientLibraries/Scala.md @@ -1,4 +1,4 @@ -For more detailed and up to date information check out the [GitHub Respository](https://github.com/influxdata/influxdb-client-java/tree/master/client-scala) +For more detailed and up to date information check out the [GitHub Repository](https://github.com/influxdata/influxdb-client-java/tree/master/client-scala) ##### Add Dependency diff --git a/src/writeData/components/telegrafPlugins/cloudwatch.md b/src/writeData/components/telegrafPlugins/cloudwatch.md index 674dd0ac43..bc7b9b50c5 100644 --- a/src/writeData/components/telegrafPlugins/cloudwatch.md +++ b/src/writeData/components/telegrafPlugins/cloudwatch.md @@ -58,6 +58,13 @@ API endpoint. In the following order the plugin will attempt to authenticate. ## gaps or overlap in pulled data interval = "5m" + ## Recommended if "delay" and "period" are both within 3 hours of request time. Invalid values will be ignored. + ## Recently Active feature will only poll for CloudWatch ListMetrics values that occurred within the last 3 Hours. + ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics API and require less memory to retain. + ## Do not enable if "period" or "delay" is longer than 3 hours, as it will not return data more than 3 hours old. + ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html + #recently_active = "PT3H" + ## Configure the TTL for the internal cache of metrics. # cache_ttl = "1h" @@ -150,7 +157,7 @@ To maximize efficiency and savings, consider making fewer requests by increasing ### Measurements & Fields: -Each CloudWatch Namespace monitored records a measurement with fields for each available Metric Statistic +Each CloudWatch Namespace monitored records a measurement with fields for each available Metric Statistic. Namespace and Metrics are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case) - cloudwatch_{namespace} diff --git a/src/writeData/components/telegrafPlugins/consul.md b/src/writeData/components/telegrafPlugins/consul.md index 8e1ecc094c..71d7d26a8f 100644 --- a/src/writeData/components/telegrafPlugins/consul.md +++ b/src/writeData/components/telegrafPlugins/consul.md @@ -17,6 +17,14 @@ report those stats already using StatsD protocol if needed. ## URI scheme for the Consul server, one of "http", "https" # scheme = "http" + ## Metric version controls the mapping from Consul metrics into + ## Telegraf metrics. Version 2 moved all fields with string values + ## to tags. + ## + ## example: metric_version = 1; deprecated in 1.16 + ## metric_version = 2; recommended version + # metric_version = 1 + ## ACL token used in every request # token = "" @@ -41,7 +49,7 @@ report those stats already using StatsD protocol if needed. ``` ### Metrics: - +##### metric_version = 1: - consul_health_checks - tags: - node (node that check/service is registered on) @@ -55,9 +63,23 @@ report those stats already using StatsD protocol if needed. - critical (integer) - warning (integer) +##### metric_version = 2: +- consul_health_checks + - tags: + - node (node that check/service is registered on) + - service_name + - check_id + - check_name + - service_id + - status + - fields: + - passing (integer) + - critical (integer) + - warning (integer) + `passing`, `critical`, and `warning` are integer representations of the health check state. A value of `1` represents that the status was the state of the -the health check at this sample. +the health check at this sample. `status` is string representation of the same state. ## Example output diff --git a/src/writeData/components/telegrafPlugins/exec.md b/src/writeData/components/telegrafPlugins/exec.md index 8ed0b51110..4e3d724542 100644 --- a/src/writeData/components/telegrafPlugins/exec.md +++ b/src/writeData/components/telegrafPlugins/exec.md @@ -1,6 +1,6 @@ # Exec Input Plugin -The `exec` plugin executes the `commands` on every interval and parses metrics from +The `exec` plugin executes all the `commands` in parallel on every interval and parses metrics from their output in any one of the accepted [Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). This plugin can be used to poll for custom metrics from any source. diff --git a/src/writeData/components/telegrafPlugins/http_response.md b/src/writeData/components/telegrafPlugins/http_response.md index 889b6f4f36..67d0dc067f 100644 --- a/src/writeData/components/telegrafPlugins/http_response.md +++ b/src/writeData/components/telegrafPlugins/http_response.md @@ -7,9 +7,7 @@ This input plugin checks HTTP/HTTPS connections. ```toml # HTTP/HTTPS request given an address a method and a timeout [[inputs.http_response]] - ## Deprecated in 1.12, use 'urls' - ## Server address (default http://localhost) - # address = "http://localhost" + ## address is Deprecated in 1.12, use 'urls' ## List of urls to query. # urls = ["http://localhost"] @@ -39,8 +37,8 @@ This input plugin checks HTTP/HTTPS connections. # {'fake':'data'} # ''' - ## Optional name of the field that will contain the body of the response. - ## By default it is set to an empty String indicating that the body's content won't be added + ## Optional name of the field that will contain the body of the response. + ## By default it is set to an empty String indicating that the body's content won't be added # response_body_field = '' ## Maximum allowed HTTP response body size in bytes. diff --git a/src/writeData/components/telegrafPlugins/influxdb_v2_listener.md b/src/writeData/components/telegrafPlugins/influxdb_v2_listener.md index cef11e9ae3..4258e021d8 100644 --- a/src/writeData/components/telegrafPlugins/influxdb_v2_listener.md +++ b/src/writeData/components/telegrafPlugins/influxdb_v2_listener.md @@ -9,12 +9,15 @@ The `/api/v2/write` endpoint supports the `precision` query parameter and can be to one of `ns`, `us`, `ms`, `s`. All other parameters are ignored and defer to the output plugins configuration. +Telegraf minimum version: Telegraf 1.16.0 + ### Configuration: ```toml [[inputs.influxdb_v2_listener]] ## Address and port to host InfluxDB listener on - service_address = ":9999" + ## (Double check the port. Could be 9999 if using OSS Beta) + service_address = ":8086" ## Maximum allowed HTTP request body size in bytes. ## 0 means to use the default of 32MiB. diff --git a/src/writeData/components/telegrafPlugins/intel_rdt.md b/src/writeData/components/telegrafPlugins/intel_rdt.md new file mode 100644 index 0000000000..1a6e55f6a7 --- /dev/null +++ b/src/writeData/components/telegrafPlugins/intel_rdt.md @@ -0,0 +1,108 @@ +# Intel RDT Input Plugin +The intel_rdt plugin collects information provided by monitoring features of +Intel Resource Director Technology (Intel(R) RDT) like Cache Monitoring Technology (CMT), +Memory Bandwidth Monitoring (MBM), Cache Allocation Technology (CAT) and Code +and Data Prioritization (CDP) Technology provide the hardware framework to monitor +and control the utilization of shared resources, like last level cache, memory bandwidth. +These Technologies comprise Intel’s Resource Director Technology (RDT). +As multithreaded and multicore platform architectures emerge, +running workloads in single-threaded, multithreaded, or complex virtual machine environment, +the last level cache and memory bandwidth are key resources to manage. Intel introduces CMT, +MBM, CAT and CDP to manage these various workloads across shared resources. + +To gather Intel RDT metrics plugin uses _pqos_ cli tool which is a part of [Intel(R) RDT Software Package](https://github.com/intel/intel-cmt-cat). +Before using this plugin please be sure _pqos_ is properly installed and configured regarding that the plugin +run _pqos_ to work with `OS Interface` mode. This plugin supports _pqos_ version 4.0.0 and above. +Be aware pqos tool needs root privileges to work properly. + +Metrics will be constantly reported from the following `pqos` commands within the given interval: + +#### In case of cores monitoring: +``` +pqos -r --iface-os --mon-file-type=csv --mon-interval=INTERVAL --mon-core=all:[CORES]\;mbt:[CORES] +``` +where `CORES` is equal to group of cores provided in config. User can provide many groups. + +#### In case of process monitoring: +``` +pqos -r --iface-os --mon-file-type=csv --mon-interval=INTERVAL --mon-pid=all:[PIDS]\;mbt:[PIDS] +``` +where `PIDS` is group of processes IDs which name are equal to provided process name in a config. +User can provide many process names which lead to create many processes groups. + +In both cases `INTERVAL` is equal to sampling_interval from config. + +Because PIDs association within system could change in every moment, Intel RDT plugin provides a +functionality to check on every interval if desired processes change their PIDs association. +If some change is reported, plugin will restart _pqos_ tool with new arguments. If provided by user +process name is not equal to any of available processes, will be omitted and plugin will constantly +check for process availability. + +### Useful links +Pqos installation process: https://github.com/intel/intel-cmt-cat/blob/master/INSTALL +Enabling OS interface: https://github.com/intel/intel-cmt-cat/wiki, https://github.com/intel/intel-cmt-cat/wiki/resctrl +More about Intel RDT: https://www.intel.com/content/www/us/en/architecture-and-technology/resource-director-technology.html + +### Configuration +```toml +# Read Intel RDT metrics +[[inputs.IntelRDT]] + ## Optionally set sampling interval to Nx100ms. + ## This value is propagated to pqos tool. Interval format is defined by pqos itself. + ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. + # sampling_interval = "10" + + ## Optionally specify the path to pqos executable. + ## If not provided, auto discovery will be performed. + # pqos_path = "/usr/local/bin/pqos" + + ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. + ## If not provided, default value is false. + # shortened_metrics = false + + ## Specify the list of groups of CPU core(s) to be provided as pqos input. + ## Mandatory if processes aren't set and forbidden if processes are specified. + ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] + # cores = ["0-3"] + + ## Specify the list of processes for which Metrics will be collected. + ## Mandatory if cores aren't set and forbidden if cores are specified. + ## e.g. ["qemu", "pmd"] + # processes = ["process"] +``` + +### Exposed metrics +| Name | Full name | Description | +|---------------|-----------------------------------------------|-------------| +| MBL | Memory Bandwidth on Local NUMA Node | Memory bandwidth utilization by the relevant CPU core/process on the local NUMA memory channel | +| MBR | Memory Bandwidth on Remote NUMA Node | Memory bandwidth utilization by the relevant CPU core/process on the remote NUMA memory channel | +| MBT | Total Memory Bandwidth | Total memory bandwidth utilized by a CPU core/process on local and remote NUMA memory channels | +| LLC | L3 Cache Occupancy | Total Last Level Cache occupancy by a CPU core/process | +| *LLC_Misses | L3 Cache Misses | Total Last Level Cache misses by a CPU core/process | +| *IPC | Instructions Per Cycle | Total instructions per cycle executed by a CPU core/process | + +*optional + +### Troubleshooting +Pointing to non-existing core will lead to throwing an error by _pqos_ and plugin will not work properly. +Be sure to check if provided core number exists within desired system. + +Be aware reading Intel RDT metrics by _pqos_ cannot be done simultaneously on the same resource. +So be sure to not use any other _pqos_ instance which is monitoring the same cores or PIDs within working system. +Also there is no possibility to monitor same cores or PIDs on different groups. + +Pids association for the given process could be manually checked by `pidof` command. E.g: +``` +pidof PROCESS +``` +where `PROCESS` is process name. + +### Example Output +``` +> rdt_metric,cores=12\,19,host=r2-compute-20,name=IPC,process=top value=0 1598962030000000000 +> rdt_metric,cores=12\,19,host=r2-compute-20,name=LLC_Misses,process=top value=0 1598962030000000000 +> rdt_metric,cores=12\,19,host=r2-compute-20,name=LLC,process=top value=0 1598962030000000000 +> rdt_metric,cores=12\,19,host=r2-compute-20,name=MBL,process=top value=0 1598962030000000000 +> rdt_metric,cores=12\,19,host=r2-compute-20,name=MBR,process=top value=0 1598962030000000000 +> rdt_metric,cores=12\,19,host=r2-compute-20,name=MBT,process=top value=0 1598962030000000000 +``` \ No newline at end of file diff --git a/src/writeData/components/telegrafPlugins/proxmox.md b/src/writeData/components/telegrafPlugins/proxmox.md index 767756178b..ac81633a3f 100644 --- a/src/writeData/components/telegrafPlugins/proxmox.md +++ b/src/writeData/components/telegrafPlugins/proxmox.md @@ -2,6 +2,8 @@ The proxmox plugin gathers metrics about containers and VMs using the Proxmox API. +Telegraf minimum version: Telegraf 1.16.0 + ### Configuration: ```toml diff --git a/src/writeData/components/telegrafPlugins/ras.md b/src/writeData/components/telegrafPlugins/ras.md new file mode 100644 index 0000000000..641d1f4884 --- /dev/null +++ b/src/writeData/components/telegrafPlugins/ras.md @@ -0,0 +1,58 @@ +# RAS Input Plugin + +The `RAS` plugin gathers and counts errors provided by [RASDaemon](https://github.com/mchehab/rasdaemon). + +### Configuration + +```toml +[[inputs.ras]] + ## Optional path to RASDaemon sqlite3 database. + ## Default: /var/lib/rasdaemon/ras-mc_event.db + # db_path = "" +``` + +In addition `RASDaemon` runs, by default, with `--enable-sqlite3` flag. In case of problems with SQLite3 database please verify this is still a default option. + +### Metrics + +- ras + - tags: + - socket_id + - fields: + - memory_read_corrected_errors + - memory_read_uncorrectable_errors + - memory_write_corrected_errors + - memory_write_uncorrectable_errors + - cache_l0_l1_errors + - tlb_instruction_errors + - cache_l2_errors + - upi_errors + - processor_base_errors + - processor_bus_errors + - internal_timer_errors + - smm_handler_code_access_violation_errors + - internal_parity_errors + - frc_errors + - external_mce_errors + - microcode_rom_parity_errors + - unclassified_mce_errors + +Please note that `processor_base_errors` is aggregate counter measuring the following MCE events: +- internal_timer_errors +- smm_handler_code_access_violation_errors +- internal_parity_errors +- frc_errors +- external_mce_errors +- microcode_rom_parity_errors +- unclassified_mce_errors + +### Permissions + +This plugin requires access to SQLite3 database from `RASDaemon`. Please make sure that user has required permissions to this database. + +### Example Output + +``` +ras,host=ubuntu,socket_id=0 external_mce_base_errors=1i,frc_errors=1i,instruction_tlb_errors=5i,internal_parity_errors=1i,internal_timer_errors=1i,l0_and_l1_cache_errors=7i,memory_read_corrected_errors=25i,memory_read_uncorrectable_errors=0i,memory_write_corrected_errors=5i,memory_write_uncorrectable_errors=0i,microcode_rom_parity_errors=1i,processor_base_errors=7i,processor_bus_errors=1i,smm_handler_code_access_violation_errors=1i,unclassified_mce_base_errors=1i 1598867393000000000 +ras,host=ubuntu level_2_cache_errors=0i,upi_errors=0i 1598867393000000000 +``` diff --git a/src/writeData/components/telegrafPlugins/redis.md b/src/writeData/components/telegrafPlugins/redis.md index f62b9db6e3..c8f343b262 100644 --- a/src/writeData/components/telegrafPlugins/redis.md +++ b/src/writeData/components/telegrafPlugins/redis.md @@ -14,6 +14,11 @@ ## If no servers are specified, then localhost is used as the host. ## If no port is specified, 6379 is used servers = ["tcp://localhost:6379"] + ## Optional. Specify redis commands to retrieve values + # [[inputs.redis.commands]] + # command = ["get", "sample-key"] + # field = "sample-key-value" + # type = "string" ## specify server password # password = "s#cr@t%" diff --git a/src/writeData/components/telegrafPlugins/smart.md b/src/writeData/components/telegrafPlugins/smart.md index d26ebc9678..dec58e3f9a 100644 --- a/src/writeData/components/telegrafPlugins/smart.md +++ b/src/writeData/components/telegrafPlugins/smart.md @@ -7,13 +7,13 @@ SMART information is separated between different measurements: `smart_device` is If no devices are specified, the plugin will scan for SMART devices via the following command: -```bash +``` smartctl --scan ``` Metrics will be reported from the following `smartctl` command: -```bash +``` smartctl --info --attributes --health -n --format=brief ``` @@ -23,7 +23,7 @@ Also, NVMe capabilities were introduced in version 6.5. To enable SMART on a storage device run: -```bash +``` smartctl -s on ``` ## NVMe vendor specific attributes @@ -35,29 +35,29 @@ In case of `nvme-cli` absence NVMe vendor specific metrics will not be obtained. Vendor specific SMART metrics for NVMe disks may be reported from the following `nvme` command: -```bash +``` nvme smart-log-add ``` Note that vendor plugins for `nvme-cli` could require different naming convention and report format. To see installed plugin extensions, depended on the nvme-cli version, look at the bottom of: -```bash +``` nvme help ``` To gather disk vendor id (vid) `id-ctrl` could be used: -```bash +``` nvme id-ctrl ``` Association between a vid and company can be found there: https://pcisig.com/membership/member-companies. Devices affiliation to being NVMe or non NVMe will be determined thanks to: -```bash +``` smartctl --scan ``` and: -```bash +``` smartctl --scan -d nvme ``` @@ -203,16 +203,16 @@ If this plugin is not working as expected for your SMART enabled device, please run these commands and include the output in a bug report: For non NVMe devices (from smartctl version >= 7.0 this will also return NVMe devices by default): -```bash +``` smartctl --scan ``` For NVMe devices: -```bash +``` smartctl --scan -d nvme ``` Run the following command replacing your configuration setting for NOCHECK and the DEVICE (name of the device could be taken from the previous command): -```bash +``` smartctl --info --health --attributes --tolerance=verypermissive --nocheck NOCHECK --format=brief -d DEVICE ``` If you try to gather vendor specific metrics, please provide this commad diff --git a/src/writeData/components/telegrafPlugins/snmp.md b/src/writeData/components/telegrafPlugins/snmp.md index 0d2eb52ab4..a0c9155db5 100644 --- a/src/writeData/components/telegrafPlugins/snmp.md +++ b/src/writeData/components/telegrafPlugins/snmp.md @@ -35,6 +35,9 @@ information. ## SNMP community string. # community = "public" + ## Agent host tag + # agent_host_tag = "agent_host" + ## Number of retries to attempt. # retries = 3 diff --git a/src/writeData/components/telegrafPlugins/sqlserver.md b/src/writeData/components/telegrafPlugins/sqlserver.md index 7f7887769d..27c6da1cd7 100644 --- a/src/writeData/components/telegrafPlugins/sqlserver.md +++ b/src/writeData/components/telegrafPlugins/sqlserver.md @@ -78,7 +78,7 @@ GO ## - AzureSQLDBResourceGovernance ## - AzureSQLDBDatabaseIO ## - AzureSQLDBServerProperties - ## - AzureSQLDBSQLOsWaitstats + ## - AzureSQLDBOsWaitstats ## - AzureSQLDBMemoryClerks ## - AzureSQLDBPerformanceCounters ## - AzureSQLDBRequests @@ -92,7 +92,7 @@ GO ## - AzureSQLMIOsWaitstats ## - AzureSQLMIMemoryClerks ## - AzureSQLMIPerformanceCounters - ## - AzureSQLMIDBRequests + ## - AzureSQLMIRequests ## - AzureSQLMISchedulers ## database_type = SQLServer by default collects the following queries diff --git a/src/writeData/components/telegrafPlugins/win_eventlog.md b/src/writeData/components/telegrafPlugins/win_eventlog.md index 9f48fd9ac3..e3c48656f7 100644 --- a/src/writeData/components/telegrafPlugins/win_eventlog.md +++ b/src/writeData/components/telegrafPlugins/win_eventlog.md @@ -6,6 +6,8 @@ Supports Windows Vista and higher. Telegraf should have Administrator permissions to subscribe for some of the Windows Events Channels, like System Log. +Telegraf minimum version: Telegraf 1.16.0 + ### Configuration ```toml diff --git a/src/writeData/constants/contentTelegrafPlugins.ts b/src/writeData/constants/contentTelegrafPlugins.ts index 3c72b9b6e0..7b6fb718e6 100644 --- a/src/writeData/constants/contentTelegrafPlugins.ts +++ b/src/writeData/constants/contentTelegrafPlugins.ts @@ -66,6 +66,7 @@ import icinga2Markdown from 'src/writeData/components/telegrafPlugins/icinga2.md import infinibandMarkdown from 'src/writeData/components/telegrafPlugins/infiniband.md' import influxdb_listenerMarkdown from 'src/writeData/components/telegrafPlugins/influxdb_listener.md' import influxdb_v2_listenerMarkdown from 'src/writeData/components/telegrafPlugins/influxdb_v2_listener.md' +import intel_rdtMarkdown from 'src/writeData/components/telegrafPlugins/intel_rdt.md' import influxdbMarkdown from 'src/writeData/components/telegrafPlugins/influxdb.md' import internalMarkdown from 'src/writeData/components/telegrafPlugins/internal.md' import interruptsMarkdown from 'src/writeData/components/telegrafPlugins/interrupts.md' @@ -143,6 +144,7 @@ import proxmoxMarkdown from 'src/writeData/components/telegrafPlugins/proxmox.md import puppetagentMarkdown from 'src/writeData/components/telegrafPlugins/puppetagent.md' import rabbitmqMarkdown from 'src/writeData/components/telegrafPlugins/rabbitmq.md' import raindropsMarkdown from 'src/writeData/components/telegrafPlugins/raindrops.md' +import rasMarkdown from 'src/writeData/components/telegrafPlugins/ras.md' import redfishMarkdown from 'src/writeData/components/telegrafPlugins/redfish.md' import redisMarkdown from 'src/writeData/components/telegrafPlugins/redis.md' import rethinkdbMarkdown from 'src/writeData/components/telegrafPlugins/rethinkdb.md' @@ -250,6 +252,7 @@ import icinga2Logo from 'src/writeData/graphics/icinga2.svg' import infinibandLogo from 'src/writeData/graphics/infiniband.svg' import influxdb_listenerLogo from 'src/writeData/graphics/influxdb_listener.svg' import influxdb_v2_listenerLogo from 'src/writeData/graphics/influxdb_v2_listener.svg' +import intel_rdtLogo from 'src/writeData/graphics/intel_rdt.svg' import influxdbLogo from 'src/writeData/graphics/influxdb.svg' import internalLogo from 'src/writeData/graphics/internal.svg' import interruptsLogo from 'src/writeData/graphics/interrupts.svg' @@ -327,6 +330,7 @@ import proxmoxLogo from 'src/writeData/graphics/proxmox.svg' import puppetagentLogo from 'src/writeData/graphics/puppetagent.svg' import rabbitmqLogo from 'src/writeData/graphics/rabbitmq.svg' import raindropsLogo from 'src/writeData/graphics/raindrops.svg' +import rasLogo from 'src/writeData/graphics/ras.svg' import redfishLogo from 'src/writeData/graphics/redfish.svg' import redisLogo from 'src/writeData/graphics/redis.svg' import rethinkdbLogo from 'src/writeData/graphics/rethinkdb.svg' @@ -807,6 +811,13 @@ export const WRITE_DATA_TELEGRAF_PLUGINS: WriteDataItem[] = [ markdown: influxdb_v2_listenerMarkdown, image: influxdb_v2_listenerLogo, }, + { + id: 'intel_rdt', + name: 'Intel RDT', + url: `${TELEGRAF_PLUGINS}/intel_rdt`, + markdown: intel_rdtMarkdown, + image: intel_rdtLogo, + }, { id: 'internal', name: 'Telegraf Internal', @@ -1339,6 +1350,13 @@ export const WRITE_DATA_TELEGRAF_PLUGINS: WriteDataItem[] = [ markdown: raindropsMarkdown, image: raindropsLogo, }, + { + id: 'ras', + name: 'RAS Daemon', + url: `${TELEGRAF_PLUGINS}/ras`, + markdown: rasMarkdown, + image: rasLogo, + }, { id: 'redfish', name: 'Redfish', diff --git a/src/writeData/graphics/intel_rdt.svg b/src/writeData/graphics/intel_rdt.svg new file mode 100644 index 0000000000..024ac9b2e5 --- /dev/null +++ b/src/writeData/graphics/intel_rdt.svg @@ -0,0 +1,31 @@ + + + + + + + diff --git a/src/writeData/graphics/ras.svg b/src/writeData/graphics/ras.svg new file mode 100644 index 0000000000..c0a92e0c0f --- /dev/null +++ b/src/writeData/graphics/ras.svg @@ -0,0 +1,1532 @@ + + + + Tux + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + Tux + 20 June 2012 + + + Garrett LeSage + + + + + + Larry Ewing, the creator of the original Tux graphic + + + + + tux + Linux + penguin + logo + + + + + Larry Ewing, Garrett LeSage + + + https://github.com/garrett/Tux + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +