From 056f7f8d004dfb5fa857f56ddc69dcd90986f825 Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Thu, 11 Aug 2016 11:09:40 -0400 Subject: [PATCH 01/19] Committing current work for haproxy module --- metricbeat/docs/fields.asciidoc | 29 +++++++ metricbeat/docs/modules/haproxy.asciidoc | 37 ++++++++ metricbeat/docs/modules/haproxy/stat.asciidoc | 19 ++++ metricbeat/etc/beat.full.yml | 8 ++ metricbeat/etc/beat.yml | 8 ++ metricbeat/etc/fields.yml | 18 ++++ metricbeat/include/list.go | 46 +++++----- metricbeat/logs/metricbeat | 10 +++ metricbeat/metricbeat.full.yml | 8 ++ metricbeat/metricbeat.template-es2x.json | 13 +++ metricbeat/metricbeat.template.json | 12 +++ metricbeat/metricbeat.yml | 9 ++ metricbeat/module/haproxy/_meta/config.yml | 6 ++ metricbeat/module/haproxy/_meta/docs.asciidoc | 4 + metricbeat/module/haproxy/_meta/fields.yml | 9 ++ metricbeat/module/haproxy/doc.go | 4 + .../module/haproxy/stat/_meta/data.json | 19 ++++ .../module/haproxy/stat/_meta/docs.asciidoc | 3 + .../module/haproxy/stat/_meta/fields.yml | 9 ++ metricbeat/module/haproxy/stat/data.go | 77 ++++++++++++++++ metricbeat/module/haproxy/stat/stat.go | 87 +++++++++++++++++++ 21 files changed, 413 insertions(+), 22 deletions(-) create mode 100644 metricbeat/docs/modules/haproxy.asciidoc create mode 100644 metricbeat/docs/modules/haproxy/stat.asciidoc create mode 100644 metricbeat/logs/metricbeat create mode 100644 metricbeat/module/haproxy/_meta/config.yml create mode 100644 metricbeat/module/haproxy/_meta/docs.asciidoc create mode 100644 metricbeat/module/haproxy/_meta/fields.yml create mode 100644 metricbeat/module/haproxy/doc.go create mode 100644 metricbeat/module/haproxy/stat/_meta/data.json create mode 100644 metricbeat/module/haproxy/stat/_meta/docs.asciidoc create mode 100644 metricbeat/module/haproxy/stat/_meta/fields.yml create mode 100644 metricbeat/module/haproxy/stat/data.go create mode 100644 metricbeat/module/haproxy/stat/stat.go diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index 73afcb1556f..04834345ff5 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -15,6 +15,7 @@ grouped in the following categories: * <> * <> * <> +* <> * <> * <> * <> @@ -443,6 +444,34 @@ required: True The document type. Always set to "metricsets". +[[exported-fields-haproxy]] +== haproxy Fields + +haproxy Module + + + +[float] +== haproxy Fields + + + + +[float] +== stat Fields + +stat + + + +[float] +=== haproxy.stat.example + +type: keyword + +Example field + + [[exported-fields-mongodb]] == MongoDB Fields diff --git a/metricbeat/docs/modules/haproxy.asciidoc b/metricbeat/docs/modules/haproxy.asciidoc new file mode 100644 index 00000000000..f5b6fb26885 --- /dev/null +++ b/metricbeat/docs/modules/haproxy.asciidoc @@ -0,0 +1,37 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-module-haproxy]] +== haproxy Module + +This is the haproxy Module. + + + +[float] +=== Example Configuration + +The haproxy module supports the standard configuration options that are described +in <>. Here is an example configuration: + +[source,yaml] +---- +metricbeat.modules: +- module: haproxy + metricsets: ["stat"] + enabled: true + period: 1s + hosts: ["localhost"] + +---- + +[float] +=== Metricsets + +The following metricsets are available: + +* <> + +include::haproxy/stat.asciidoc[] + diff --git a/metricbeat/docs/modules/haproxy/stat.asciidoc b/metricbeat/docs/modules/haproxy/stat.asciidoc new file mode 100644 index 00000000000..a6ff8f587e1 --- /dev/null +++ b/metricbeat/docs/modules/haproxy/stat.asciidoc @@ -0,0 +1,19 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-metricset-haproxy-stat]] +include::../../../module/haproxy/stat/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/haproxy/stat/_meta/data.json[] +---- diff --git a/metricbeat/etc/beat.full.yml b/metricbeat/etc/beat.full.yml index 83d8124b705..a20b821ffed 100644 --- a/metricbeat/etc/beat.full.yml +++ b/metricbeat/etc/beat.full.yml @@ -61,6 +61,14 @@ metricbeat.modules: # Password of hosts. Empty by default #password: test123 +#------------------------------- haproxy Module ------------------------------ +- module: haproxy + metricsets: ["stat"] + enabled: true + period: 1s + hosts: ["localhost"] + + #------------------------------- MongoDB Module ------------------------------ #- module: mongodb #metricsets: ["status"] diff --git a/metricbeat/etc/beat.yml b/metricbeat/etc/beat.yml index 15421af9fe4..f8052802367 100644 --- a/metricbeat/etc/beat.yml +++ b/metricbeat/etc/beat.yml @@ -43,4 +43,12 @@ metricbeat.modules: # if true, exports the CPU usage in ticks, together with the percentage values cpu_ticks: false +#------------------------------- haproxy Module ------------------------------ +- module: haproxy + metricsets: ["stat"] + enabled: true + period: 1s + hosts: ["localhost"] + + diff --git a/metricbeat/etc/fields.yml b/metricbeat/etc/fields.yml index 257290dd5db..2e53d8aad5e 100644 --- a/metricbeat/etc/fields.yml +++ b/metricbeat/etc/fields.yml @@ -205,6 +205,24 @@ type: integer description: > Total. +- key: haproxy + title: "haproxy" + description: > + haproxy Module + fields: + - name: haproxy + type: group + description: > + fields: + - name: stat + type: group + description: > + stat + fields: + - name: example + type: keyword + description: > + Example field - key: mongodb title: "MongoDB" description: > diff --git a/metricbeat/include/list.go b/metricbeat/include/list.go index d785586d17e..57320029917 100644 --- a/metricbeat/include/list.go +++ b/metricbeat/include/list.go @@ -8,26 +8,28 @@ package include import ( // This list is automatically generated by `make imports` - _ "github.com/elastic/beats/metricbeat/module/apache" - _ "github.com/elastic/beats/metricbeat/module/apache/status" - _ "github.com/elastic/beats/metricbeat/module/mongodb" - _ "github.com/elastic/beats/metricbeat/module/mongodb/status" - _ "github.com/elastic/beats/metricbeat/module/mysql" - _ "github.com/elastic/beats/metricbeat/module/mysql/status" - _ "github.com/elastic/beats/metricbeat/module/nginx" - _ "github.com/elastic/beats/metricbeat/module/nginx/stubstatus" - _ "github.com/elastic/beats/metricbeat/module/redis" - _ "github.com/elastic/beats/metricbeat/module/redis/info" - _ "github.com/elastic/beats/metricbeat/module/redis/keyspace" - _ "github.com/elastic/beats/metricbeat/module/system" - _ "github.com/elastic/beats/metricbeat/module/system/core" - _ "github.com/elastic/beats/metricbeat/module/system/cpu" - _ "github.com/elastic/beats/metricbeat/module/system/diskio" - _ "github.com/elastic/beats/metricbeat/module/system/filesystem" - _ "github.com/elastic/beats/metricbeat/module/system/fsstat" - _ "github.com/elastic/beats/metricbeat/module/system/memory" - _ "github.com/elastic/beats/metricbeat/module/system/network" - _ "github.com/elastic/beats/metricbeat/module/system/process" - _ "github.com/elastic/beats/metricbeat/module/zookeeper" - _ "github.com/elastic/beats/metricbeat/module/zookeeper/mntr" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/apache" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/apache/status" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/haproxy" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/haproxy/stat" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/mongodb" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/mongodb/status" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/mysql" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/mysql/status" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/nginx" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/nginx/stubstatus" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/redis" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/redis/info" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/redis/keyspace" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/system" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/system/core" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/system/cpu" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/system/diskio" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/system/filesystem" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/system/fsstat" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/system/memory" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/system/network" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/system/process" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/zookeeper" + _ "vre/Documents/code/github_personal/beats/metricbeat/module/zookeeper/mntr" ) diff --git a/metricbeat/logs/metricbeat b/metricbeat/logs/metricbeat new file mode 100644 index 00000000000..ee4186e2116 --- /dev/null +++ b/metricbeat/logs/metricbeat @@ -0,0 +1,10 @@ +2016-08-10T19:06:01-04:00 INFO Metrics logging every 30s +2016-08-10T19:06:01-04:00 INFO Loading template enabled. Reading template file: /Users/alain.lefebvre/Documents/code/github_personal/beats/metricbeat/metricbeat.template.json +2016-08-10T19:06:01-04:00 INFO Loading template enabled for Elasticsearch 2.x. Reading template file: /Users/alain.lefebvre/Documents/code/github_personal/beats/metricbeat/metricbeat.template-es2x.json +2016-08-10T19:06:01-04:00 INFO Elasticsearch url: http://localhost:9200 +2016-08-10T19:06:01-04:00 INFO Activated elasticsearch as output plugin. +2016-08-10T19:06:01-04:00 INFO Publisher name: Alain-L-MBP +2016-08-10T19:06:01-04:00 INFO Flush Interval set to: 1s +2016-08-10T19:06:01-04:00 INFO Max Bulk Size set to: 50 +2016-08-10T19:06:01-04:00 INFO Register [ModuleFactory:[system], MetricSetFactory:[apache/status, mongodb/status, mysql/status, nginx/stubstatus, redis/info, redis/keyspace, system/core, system/cpu, system/filesystem, system/fsstat, system/load, system/memory, system/network, system/process, zookeeper/mntr]] +2016-08-10T19:06:01-04:00 CRIT Exiting: 1 error: metricset 'haproxy/stat' is not registered, module not found diff --git a/metricbeat/metricbeat.full.yml b/metricbeat/metricbeat.full.yml index 9f18daa46fa..2b92f670d85 100644 --- a/metricbeat/metricbeat.full.yml +++ b/metricbeat/metricbeat.full.yml @@ -61,6 +61,14 @@ metricbeat.modules: # Password of hosts. Empty by default #password: test123 +#------------------------------- haproxy Module ------------------------------ +- module: haproxy + metricsets: ["stat"] + enabled: true + period: 1s + hosts: ["localhost"] + + #------------------------------- MongoDB Module ------------------------------ #- module: mongodb #metricsets: ["status"] diff --git a/metricbeat/metricbeat.template-es2x.json b/metricbeat/metricbeat.template-es2x.json index 1dc8445a080..39df7398992 100644 --- a/metricbeat/metricbeat.template-es2x.json +++ b/metricbeat/metricbeat.template-es2x.json @@ -177,6 +177,19 @@ } } }, + "haproxy": { + "properties": { + "stat": { + "properties": { + "example": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + } + } + } + } + }, "metricset": { "properties": { "host": { diff --git a/metricbeat/metricbeat.template.json b/metricbeat/metricbeat.template.json index 92c51d71756..dc5eabb39aa 100644 --- a/metricbeat/metricbeat.template.json +++ b/metricbeat/metricbeat.template.json @@ -171,6 +171,18 @@ } } }, + "haproxy": { + "properties": { + "stat": { + "properties": { + "example": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, "metricset": { "properties": { "host": { diff --git a/metricbeat/metricbeat.yml b/metricbeat/metricbeat.yml index e2fc374e0e9..9c58871565f 100644 --- a/metricbeat/metricbeat.yml +++ b/metricbeat/metricbeat.yml @@ -45,6 +45,15 @@ metricbeat.modules: +- module: haproxy + metricsets: ["stat"] + enabled: true + period: 5s + hosts: ["localhost"] + stats_method: "unix_socket" + stats_path: "/Users/alain.lefebvre/tmp/haproxy.sock" + + #================================ General ===================================== # The name of the shipper that publishes the network data. It can be used to group diff --git a/metricbeat/module/haproxy/_meta/config.yml b/metricbeat/module/haproxy/_meta/config.yml new file mode 100644 index 00000000000..e5958517958 --- /dev/null +++ b/metricbeat/module/haproxy/_meta/config.yml @@ -0,0 +1,6 @@ +- module: haproxy + metricsets: ["stat"] + enabled: true + period: 1s + hosts: ["localhost"] + diff --git a/metricbeat/module/haproxy/_meta/docs.asciidoc b/metricbeat/module/haproxy/_meta/docs.asciidoc new file mode 100644 index 00000000000..1605700b6de --- /dev/null +++ b/metricbeat/module/haproxy/_meta/docs.asciidoc @@ -0,0 +1,4 @@ +== haproxy Module + +This is the haproxy Module. + diff --git a/metricbeat/module/haproxy/_meta/fields.yml b/metricbeat/module/haproxy/_meta/fields.yml new file mode 100644 index 00000000000..b3b1127c157 --- /dev/null +++ b/metricbeat/module/haproxy/_meta/fields.yml @@ -0,0 +1,9 @@ +- key: haproxy + title: "haproxy" + description: > + haproxy Module + fields: + - name: haproxy + type: group + description: > + fields: diff --git a/metricbeat/module/haproxy/doc.go b/metricbeat/module/haproxy/doc.go new file mode 100644 index 00000000000..019acc1f634 --- /dev/null +++ b/metricbeat/module/haproxy/doc.go @@ -0,0 +1,4 @@ +/* +Package haproxy is a Metricbeat module that contains MetricSets. +*/ +package haproxy diff --git a/metricbeat/module/haproxy/stat/_meta/data.json b/metricbeat/module/haproxy/stat/_meta/data.json new file mode 100644 index 00000000000..8659dd508dc --- /dev/null +++ b/metricbeat/module/haproxy/stat/_meta/data.json @@ -0,0 +1,19 @@ +{ + "@timestamp":"2016-05-23T08:05:34.853Z", + "beat":{ + "hostname":"beathost", + "name":"beathost" + }, + "metricset":{ + "host":"localhost", + "module":"mysql", + "name":"status", + "rtt":44269 + }, + "haproxy":{ + "stat":{ + "example": "stat" + } + }, + "type":"metricsets" +} diff --git a/metricbeat/module/haproxy/stat/_meta/docs.asciidoc b/metricbeat/module/haproxy/stat/_meta/docs.asciidoc new file mode 100644 index 00000000000..625d172bb92 --- /dev/null +++ b/metricbeat/module/haproxy/stat/_meta/docs.asciidoc @@ -0,0 +1,3 @@ +=== haproxy stat MetricSet + +This is the stat metricset of the module haproxy. diff --git a/metricbeat/module/haproxy/stat/_meta/fields.yml b/metricbeat/module/haproxy/stat/_meta/fields.yml new file mode 100644 index 00000000000..7fa44ea0e91 --- /dev/null +++ b/metricbeat/module/haproxy/stat/_meta/fields.yml @@ -0,0 +1,9 @@ +- name: stat + type: group + description: > + stat + fields: + - name: example + type: keyword + description: > + Example field diff --git a/metricbeat/module/haproxy/stat/data.go b/metricbeat/module/haproxy/stat/data.go new file mode 100644 index 00000000000..b090c129e7f --- /dev/null +++ b/metricbeat/module/haproxy/stat/data.go @@ -0,0 +1,77 @@ +package stat + +import ( + "github.com/elastic/beats/libbeat/common" + s "github.com/elastic/beats/metricbeat/schema" + c "github.com/elastic/beats/metricbeat/schema/mapstrstr" + "strings" +) + +var ( + schema = s.Schema{ + "nb_proc": c.Int("Nbproc"), + "process_num": c.Int("Process_num"), + "pid": c.Int("Pid"), + "uptime_sec": c.Int("Uptime_sec"), + "mem_max_mb": c.Int("Memmax_MB"), + "ulimit_n": c.Int("Ulimit-n"), + "max_sock": c.Int("Maxsock"), + "max_conn": c.Int("Maxconn"), + "hard_max_conn": c.Init("Hard_maxconn"), + "curr_conns": c.Init("CurrConns"), + "cum_conns": c.Init("CumConns"), + "cum_req": c.Init("CumReq"), + "max_ssl_conns": c.Init("MaxSslConns"), + "curr_ssl_conns": c.Init("CurrSslConns"), + "cum_ssl_conns": c.Init("CumSslConns"), + "max_pipes": c.Init("Maxpipes"), + "pipes_used": c.Init("PipesUsed"), + "pipes_free": c.Init("PipesFree"), + "conn_rate": c.Init("ConnRate"), + "conn_rate_limit": c.Init("ConnRateLimit"), + "max_conn_rate": c.Init("MaxConnRate"), + "sess_rate": c.Init("SessRate"), + "sess_rate_limit": c.Init("SessRateLimit"), + "max_sess_rate": c.Init("MaxSessRate"), + "ssl_rate": c.Init("SslRate"), + "ssl_rate_limit": c.Init("SslRateLimit"), + "max_ssl_rate": c.Init("MaxSslRate"), + "ssl_frontend_key_rate": c.Init("SslFrontendKeyRate"), + "ssl_frontend_max_key_rate": c.Init("SslFrontendMaxKeyRate"), + "ssl_frontend_session_reuse_pct": c.Init("SslFrontendSessionReuse_pct"), + "ssl_babckend_key_rate": c.Init("SslBackendKeyRate"), + "ssl_backend_max_key_rate": c.Init("SslBackendMaxKeyRate"), + "ssl_cached_lookups": c.Init("SslCacheLookups"), + "ssl_cache_misses": c.Init("SslCacheMisses"), + "compress_bps_in": c.Init("CompressBpsIn"), + "compress_bps_out": c.Init("CompressBpsOut"), + "compress_bps_rate_limit": c.Init("CompressBpsRateLim"), + "zlib_mem_usage": c.Init("ZlibMemUsage"), + "max_zlib_mem_usage": c.Init("MaxZlibMemUsage"), + "tasks": c.Init("Tasks"), + "run_queue": c.Init("Run_queue"), + "idle_pct": c.Init("Idle_pct"), + } +) + +func parseResponse(data []byte) map[string]string { + resultMap := map[string]string{} + str := string(data) + for _, ln := range strings.Split(str, "\n") { + parts := strings.Split(strings.Trim(ln, " "), ":") + if parts[0] == "Name" || parts[0] == "Version" || parts[0] == "Release_date" || parts[0] == "Uptime" { + continue + } + resultMap[parts[0]] = strings.Trim(parts[1], " ") + } +} + +// Map data to MapStr +func eventMapping(info map[string]string) common.MapStr { + // Full mapping from info + source := map[string]interface{}{} + for key, val := range info { + source[key] = val + } + return schema.Apply(source) +} diff --git a/metricbeat/module/haproxy/stat/stat.go b/metricbeat/module/haproxy/stat/stat.go new file mode 100644 index 00000000000..01b215505c8 --- /dev/null +++ b/metricbeat/module/haproxy/stat/stat.go @@ -0,0 +1,87 @@ +package stat + +import ( + "fmt" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/metricbeat/mb" + s "github.com/elastic/beats/metricbeat/schema" + c "github.com/elastic/beats/metricbeat/schema/mapstrstr" +) + +const ( + // defaultSocket is the default path to the unix socket tfor stats on haproxy. + statsMethod = "stat" + defaultSocket = "/var/lib/haproxy/stats" +) + +// init registers the MetricSet with the central registry. +// The New method will be called after the setup of the module and before starting to fetch data +func init() { + if err := mb.Registry.AddMetricSet("haproxy", "stat", New); err != nil { + panic(err) + } +} + +// MetricSet type defines all fields of the MetricSet +// As a minimum it must inherit the mb.BaseMetricSet fields, but can be extended with +// additional entries. These variables can be used to persist data or configuration between +// multiple fetch calls. +type MetricSet struct { + mb.BaseMetricSet + statsMethod string + statsPath string + counter int +} + +// New create a new instance of the MetricSet +// Part of new is also setting up the configuration by processing additional +// configuration entries if needed. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + + //config := struct{}{} + + config := struct { + StatsMethod string `config:"stats_method"` + StatsPath string `config:"stats_path"` + }{ + StatsMethod: "unix_socket", + StatsPath: defaultSocket, + } + + if err := base.Module().UnpackConfig(&config); err != nil { + return nil, err + } + + return &MetricSet{ + BaseMetricSet: base, + statsMethod: config.StatsMethod, + statsPath: config.StatsPath, + counter: 1, + }, nil +} + +// Fetch methods implements the data gathering and data conversion to the right format +// It returns the event which is then forward to the output. In case of an error, a +// descriptive error must be returned. +func (m *MetricSet) Fetch() (common.MapStr, error) { + + var output []byte + if m.statsMethod == "unix_socket" { + c, err := net.Dial("unix", config.StatsSocket) + buf := make([]byte, 4096) + + if err != nil { + return nil, fmt.Errorf(fmt.Sprintf("HAProxy %s error: %s", statsMethod, err)) + } + + _, err = c.Write([]byte(fmt.Sprintf("show %s\n", statsMethod))) + oputut, err := c.Read(buf) + + } else { + + } + + m.counter++ + + return eventMapping(parseResponse(output)), nil +} From 7b4c97d17b6329497ab9784a9880b02883a48db7 Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Sat, 13 Aug 2016 08:07:59 -0400 Subject: [PATCH 02/19] Committing current changes. --- metricbeat/include/list.go | 49 ++-- metricbeat/metricbeat.yml | 14 +- .../module/haproxy/info/_meta/data.json | 19 ++ .../module/haproxy/info/_meta/docs.asciidoc | 3 + .../module/haproxy/info/_meta/fields.yml | 9 + metricbeat/module/haproxy/info/data.go | 90 ++++++ metricbeat/module/haproxy/info/info.go | 99 +++++++ .../module/haproxy/stat/_meta/docs.asciidoc | 2 +- .../module/haproxy/stat/_meta/fields.yml | 258 +++++++++++++++++- metricbeat/module/haproxy/stat/data.go | 161 +++++++---- metricbeat/module/haproxy/stat/stat.go | 39 ++- 11 files changed, 644 insertions(+), 99 deletions(-) create mode 100644 metricbeat/module/haproxy/info/_meta/data.json create mode 100644 metricbeat/module/haproxy/info/_meta/docs.asciidoc create mode 100644 metricbeat/module/haproxy/info/_meta/fields.yml create mode 100644 metricbeat/module/haproxy/info/data.go create mode 100644 metricbeat/module/haproxy/info/info.go diff --git a/metricbeat/include/list.go b/metricbeat/include/list.go index 57320029917..340b8cc15d5 100644 --- a/metricbeat/include/list.go +++ b/metricbeat/include/list.go @@ -8,28 +8,29 @@ package include import ( // This list is automatically generated by `make imports` - _ "vre/Documents/code/github_personal/beats/metricbeat/module/apache" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/apache/status" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/haproxy" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/haproxy/stat" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/mongodb" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/mongodb/status" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/mysql" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/mysql/status" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/nginx" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/nginx/stubstatus" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/redis" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/redis/info" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/redis/keyspace" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/system" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/system/core" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/system/cpu" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/system/diskio" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/system/filesystem" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/system/fsstat" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/system/memory" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/system/network" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/system/process" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/zookeeper" - _ "vre/Documents/code/github_personal/beats/metricbeat/module/zookeeper/mntr" + _ "github.com/elastic/beats/metricbeat/module/apache" + _ "github.com/elastic/beats/metricbeat/module/apache/status" + _ "github.com/elastic/beats/metricbeat/module/haproxy" + _ "github.com/elastic/beats/metricbeat/module/haproxy/info" + _ "github.com/elastic/beats/metricbeat/module/haproxy/stat" + _ "github.com/elastic/beats/metricbeat/module/mongodb" + _ "github.com/elastic/beats/metricbeat/module/mongodb/status" + _ "github.com/elastic/beats/metricbeat/module/mysql" + _ "github.com/elastic/beats/metricbeat/module/mysql/status" + _ "github.com/elastic/beats/metricbeat/module/nginx" + _ "github.com/elastic/beats/metricbeat/module/nginx/stubstatus" + _ "github.com/elastic/beats/metricbeat/module/redis" + _ "github.com/elastic/beats/metricbeat/module/redis/info" + _ "github.com/elastic/beats/metricbeat/module/redis/keyspace" + _ "github.com/elastic/beats/metricbeat/module/system" + _ "github.com/elastic/beats/metricbeat/module/system/core" + _ "github.com/elastic/beats/metricbeat/module/system/cpu" + _ "github.com/elastic/beats/metricbeat/module/system/diskio" + _ "github.com/elastic/beats/metricbeat/module/system/filesystem" + _ "github.com/elastic/beats/metricbeat/module/system/fsstat" + _ "github.com/elastic/beats/metricbeat/module/system/memory" + _ "github.com/elastic/beats/metricbeat/module/system/network" + _ "github.com/elastic/beats/metricbeat/module/system/process" + _ "github.com/elastic/beats/metricbeat/module/zookeeper" + _ "github.com/elastic/beats/metricbeat/module/zookeeper/mntr" ) diff --git a/metricbeat/metricbeat.yml b/metricbeat/metricbeat.yml index 9c58871565f..f66c95f1061 100644 --- a/metricbeat/metricbeat.yml +++ b/metricbeat/metricbeat.yml @@ -36,24 +36,26 @@ metricbeat.modules: # Per process stats - process - enabled: true + enabled: false period: 10s processes: ['.*'] # if true, exports the CPU usage in ticks, together with the percentage values cpu_ticks: false - - +#------------------------------- haproxy Module ------------------------------ - module: haproxy - metricsets: ["stat"] + metricsets: + - "stat" + - "info" enabled: true - period: 5s - hosts: ["localhost"] + period: 10s stats_method: "unix_socket" stats_path: "/Users/alain.lefebvre/tmp/haproxy.sock" + + #================================ General ===================================== # The name of the shipper that publishes the network data. It can be used to group diff --git a/metricbeat/module/haproxy/info/_meta/data.json b/metricbeat/module/haproxy/info/_meta/data.json new file mode 100644 index 00000000000..5f81cf1613a --- /dev/null +++ b/metricbeat/module/haproxy/info/_meta/data.json @@ -0,0 +1,19 @@ +{ + "@timestamp":"2016-05-23T08:05:34.853Z", + "beat":{ + "hostname":"beathost", + "name":"beathost" + }, + "metricset":{ + "host":"localhost", + "module":"mysql", + "name":"status", + "rtt":44269 + }, + "haproxy":{ + "info":{ + "example": "info" + } + }, + "type":"metricsets" +} diff --git a/metricbeat/module/haproxy/info/_meta/docs.asciidoc b/metricbeat/module/haproxy/info/_meta/docs.asciidoc new file mode 100644 index 00000000000..6725bc19c53 --- /dev/null +++ b/metricbeat/module/haproxy/info/_meta/docs.asciidoc @@ -0,0 +1,3 @@ +=== haproxy stat MetricSet + +This is the info metricset of the module haproxy. diff --git a/metricbeat/module/haproxy/info/_meta/fields.yml b/metricbeat/module/haproxy/info/_meta/fields.yml new file mode 100644 index 00000000000..85add22af1b --- /dev/null +++ b/metricbeat/module/haproxy/info/_meta/fields.yml @@ -0,0 +1,9 @@ +- name: info + type: group + description: > + info + fields: + - name: example + type: keyword + description: > + Example field diff --git a/metricbeat/module/haproxy/info/data.go b/metricbeat/module/haproxy/info/data.go new file mode 100644 index 00000000000..aaa3c79a523 --- /dev/null +++ b/metricbeat/module/haproxy/info/data.go @@ -0,0 +1,90 @@ +package info + +import ( + "github.com/elastic/beats/libbeat/common" + s "github.com/elastic/beats/metricbeat/schema" + c "github.com/elastic/beats/metricbeat/schema/mapstrstr" + "strings" +) + +var ( + schema = s.Schema{ + "nb_proc": c.Int("Nbproc"), + "process_num": c.Int("Process_num"), + "pid": c.Int("Pid"), + "uptime_sec": c.Int("Uptime_sec"), + "mem_max_mb": c.Int("Memmax_MB"), + "ulimit_n": c.Int("Ulimit-n"), + "max_sock": c.Int("Maxsock"), + "max_conn": c.Int("Maxconn"), + "hard_max_conn": c.Int("Hard_maxconn"), + "curr_conns": c.Int("CurrConns"), + "cum_conns": c.Int("CumConns"), + "cum_req": c.Int("CumReq"), + "max_ssl_conns": c.Int("MaxSslConns"), + "curr_ssl_conns": c.Int("CurrSslConns"), + "cum_ssl_conns": c.Int("CumSslConns"), + "max_pipes": c.Int("Maxpipes"), + "pipes_used": c.Int("PipesUsed"), + "pipes_free": c.Int("PipesFree"), + "conn_rate": c.Int("ConnRate"), + "conn_rate_limit": c.Int("ConnRateLimit"), + "max_conn_rate": c.Int("MaxConnRate"), + "sess_rate": c.Int("SessRate"), + "sess_rate_limit": c.Int("SessRateLimit"), + "max_sess_rate": c.Int("MaxSessRate"), + "ssl_rate": c.Int("SslRate"), + "ssl_rate_limit": c.Int("SslRateLimit"), + "max_ssl_rate": c.Int("MaxSslRate"), + "ssl_frontend_key_rate": c.Int("SslFrontendKeyRate"), + "ssl_frontend_max_key_rate": c.Int("SslFrontendMaxKeyRate"), + "ssl_frontend_session_reuse_pct": c.Int("SslFrontendSessionReuse_pct"), + "ssl_babckend_key_rate": c.Int("SslBackendKeyRate"), + "ssl_backend_max_key_rate": c.Int("SslBackendMaxKeyRate"), + "ssl_cached_lookups": c.Int("SslCacheLookups"), + "ssl_cache_misses": c.Int("SslCacheMisses"), + "compress_bps_in": c.Int("CompressBpsIn"), + "compress_bps_out": c.Int("CompressBpsOut"), + "compress_bps_rate_limit": c.Int("CompressBpsRateLim"), + "zlib_mem_usage": c.Int("ZlibMemUsage"), + "max_zlib_mem_usage": c.Int("MaxZlibMemUsage"), + "tasks": c.Int("Tasks"), + "run_queue": c.Int("Run_queue"), + "idle_pct": c.Int("Idle_pct"), + } +) + +func parseResponse(data []byte) map[string]string { + + resultMap := map[string]string{} + str := string(data) + + for _, ln := range strings.Split(str, "\n") { + + ln := strings.Trim(ln, " ") + if ln == "" { + continue + } + + parts := strings.Split(strings.Trim(ln, " "), ":") + if len(parts) != 2 { + continue + } + + if parts[0] == "Name" || parts[0] == "Version" || parts[0] == "Release_date" || parts[0] == "Uptime" || parts[0] == "node" || parts[0] == "description" { + continue + } + resultMap[parts[0]] = strings.Trim(parts[1], " ") + } + return resultMap +} + +// Map data to MapStr +func eventMapping(info map[string]string) common.MapStr { + // Full mapping from info + source := map[string]interface{}{} + for key, val := range info { + source[key] = val + } + return schema.Apply(source) +} diff --git a/metricbeat/module/haproxy/info/info.go b/metricbeat/module/haproxy/info/info.go new file mode 100644 index 00000000000..10af7eef19d --- /dev/null +++ b/metricbeat/module/haproxy/info/info.go @@ -0,0 +1,99 @@ +package info + +import ( + "errors" + "fmt" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/metricbeat/mb" + "net" +) + +const ( + // defaultSocket is the default path to the unix socket tfor stats on haproxy. + statsMethod = "info" + defaultSocket = "/var/lib/haproxy/stats" +) + +// init registers the MetricSet with the central registry. +// The New method will be called after the setup of the module and before starting to fetch data +func init() { + if err := mb.Registry.AddMetricSet("haproxy", "info", New); err != nil { + panic(err) + } +} + +// MetricSet type defines all fields of the MetricSet +// As a minimum it must inherit the mb.BaseMetricSet fields, but can be extended with +// additional entries. These variables can be used to persist data or configuration between +// multiple fetch calls. +type MetricSet struct { + mb.BaseMetricSet + statsMethod string + statsPath string + counter int +} + +// New create a new instance of the MetricSet +// Part of new is also setting up the configuration by processing additional +// configuration entries if needed. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + + config := struct { + StatsMethod string `config:"stats_method"` + StatsPath string `config:"stats_path"` + }{ + StatsMethod: "unix_socket", + StatsPath: defaultSocket, + } + + if err := base.Module().UnpackConfig(&config); err != nil { + return nil, err + } + + return &MetricSet{ + BaseMetricSet: base, + statsMethod: config.StatsMethod, + statsPath: config.StatsPath, + counter: 1, + }, nil +} + +// Fetch methods implements the data gathering and data conversion to the right format +// It returns the event which is then forward to the output. In case of an error, a +// descriptive error must be returned. +func (m *MetricSet) Fetch() (common.MapStr, error) { + + if m.statsMethod == "unix_socket" { + + m.counter++ + c, err := net.Dial("unix", m.statsPath) + if err != nil { + return nil, fmt.Errorf(fmt.Sprintf("HAProxy %s error: %s", statsMethod, err)) + } + defer c.Close() + + // Write the command to the socket + _, err = c.Write([]byte(fmt.Sprintf("show %s\n", statsMethod))) + if err != nil { + return nil, fmt.Errorf("Socket write error: %s", err) + } + + // Now read from the socket + buf := make([]byte, 2048) + for { + _, err := c.Read(buf[:]) + if err != nil { + return nil, err + } + return eventMapping(parseResponse(buf)), nil + } + + } else { + // Get the data from the HTTP URI + m.counter++ + + } + + return nil, errors.New("Error getting HAProxy info") + +} diff --git a/metricbeat/module/haproxy/stat/_meta/docs.asciidoc b/metricbeat/module/haproxy/stat/_meta/docs.asciidoc index 625d172bb92..6725bc19c53 100644 --- a/metricbeat/module/haproxy/stat/_meta/docs.asciidoc +++ b/metricbeat/module/haproxy/stat/_meta/docs.asciidoc @@ -1,3 +1,3 @@ === haproxy stat MetricSet -This is the stat metricset of the module haproxy. +This is the info metricset of the module haproxy. diff --git a/metricbeat/module/haproxy/stat/_meta/fields.yml b/metricbeat/module/haproxy/stat/_meta/fields.yml index 7fa44ea0e91..947e3612db3 100644 --- a/metricbeat/module/haproxy/stat/_meta/fields.yml +++ b/metricbeat/module/haproxy/stat/_meta/fields.yml @@ -3,7 +3,259 @@ description: > stat fields: - - name: example - type: keyword + - name: pxname + type: string + description: > + [DESC] + + - name: svname + type: string + description: > + [DESC] + + - name: qcur + type: integer + description: > + Currrent queued requess + + - name: qmax + type: integer + description: > + Max queued requests + + - name: scur + type: integer + description: > + Current sessions + + - name: smax + type: integer + description: > + Max sessions + + - name: slim + type: integer + description: > + Session limit + + - name: stot + type: string + description: > + Total sessions + + - name: bin + type: integer + description: > + Bytes in + + - name: bout + type: integer + description: > + Bytes out + + - name: dreq + type: integer + description: > + Denied Requests + + - name: dresp + type: integer + description: > + Denied Responses + + - name: ereq + type: integer + description: > + Request Errors + + - name: econ + type: string + description: > + + - name: eresp + type: integer + description: > + + - name: wretr + type: integer + description: > + + - name: wredis + type: integer + description: > + + - name: status + type: integer + description: > + + - name: weight + type: string + description: > + + - name: act + type: integer + description: > + + - name: bck + type: integer + description: > + + - name: chkfail + type: integer + description: > + + - name: chkdown + type: integer + description: > + + - name: lastchg + type: string + description: > + + - name: downtime + type: integer + description: > + + - name: qlimit + type: integer + description: > + + - name: pid + type: integer + description: > + + - name: iid + type: integer + description: > + + - name: sid + type: string + description: > + + - name: throttle + type: integer + description: > + + - name: lbtot + type: integer + description: > + + - name: tracked + type: integer + description: > + + - name: type + type: integer + description: > + + - name: rate + type: integer + description: > + + - name: rate_lim + type: integer + description: > + + - name: rate_max + type: integer + description: > + + - name: check_status + type: integer + description: > + + - name: check_code + type: integer + description: > + + - name: check_duration + type: integer + description: > + + - name: hrsp_2xx + type: integer + description: > + + - name: hrsp_3xx + type: integer + description: > + + - name: hrsp_4xx + type: integer + description: > + + - name: hrsp_5xx + type: integer + description: > + + - name: hrsp_other + type: integer + description: > + + - name: hanafail + type: integer + description: > + + - name: req_rate + type: integer + description: > + + - name: req_rate_max + type: integer + description: > + + - name: req_tot + type: integer + description: > + + - name: cli_abrt + type: integer + description: > + + - name: srv_abrt + type: integer + description: > + + - name: comp_in + type: integer + description: > + + - name: comp_out + type: integer + description: > + + - name: comp_byp + type: integer + description: > + + - name: comp_rsp + type: integer + description: > + + - name: lastsess + type: integer + description: > + + - name: last_chk + type: integer + description: > + + - name: last_agt + type: integer + description: > + + - name: qtime + type: integer + description: > + + - name: ctime + type: integer + description: > + + - name: rtime + type: integer + description: > + + - name: ttime + type: integer description: > - Example field diff --git a/metricbeat/module/haproxy/stat/data.go b/metricbeat/module/haproxy/stat/data.go index b090c129e7f..2d77164f18e 100644 --- a/metricbeat/module/haproxy/stat/data.go +++ b/metricbeat/module/haproxy/stat/data.go @@ -9,69 +9,124 @@ import ( var ( schema = s.Schema{ - "nb_proc": c.Int("Nbproc"), - "process_num": c.Int("Process_num"), - "pid": c.Int("Pid"), - "uptime_sec": c.Int("Uptime_sec"), - "mem_max_mb": c.Int("Memmax_MB"), - "ulimit_n": c.Int("Ulimit-n"), - "max_sock": c.Int("Maxsock"), - "max_conn": c.Int("Maxconn"), - "hard_max_conn": c.Init("Hard_maxconn"), - "curr_conns": c.Init("CurrConns"), - "cum_conns": c.Init("CumConns"), - "cum_req": c.Init("CumReq"), - "max_ssl_conns": c.Init("MaxSslConns"), - "curr_ssl_conns": c.Init("CurrSslConns"), - "cum_ssl_conns": c.Init("CumSslConns"), - "max_pipes": c.Init("Maxpipes"), - "pipes_used": c.Init("PipesUsed"), - "pipes_free": c.Init("PipesFree"), - "conn_rate": c.Init("ConnRate"), - "conn_rate_limit": c.Init("ConnRateLimit"), - "max_conn_rate": c.Init("MaxConnRate"), - "sess_rate": c.Init("SessRate"), - "sess_rate_limit": c.Init("SessRateLimit"), - "max_sess_rate": c.Init("MaxSessRate"), - "ssl_rate": c.Init("SslRate"), - "ssl_rate_limit": c.Init("SslRateLimit"), - "max_ssl_rate": c.Init("MaxSslRate"), - "ssl_frontend_key_rate": c.Init("SslFrontendKeyRate"), - "ssl_frontend_max_key_rate": c.Init("SslFrontendMaxKeyRate"), - "ssl_frontend_session_reuse_pct": c.Init("SslFrontendSessionReuse_pct"), - "ssl_babckend_key_rate": c.Init("SslBackendKeyRate"), - "ssl_backend_max_key_rate": c.Init("SslBackendMaxKeyRate"), - "ssl_cached_lookups": c.Init("SslCacheLookups"), - "ssl_cache_misses": c.Init("SslCacheMisses"), - "compress_bps_in": c.Init("CompressBpsIn"), - "compress_bps_out": c.Init("CompressBpsOut"), - "compress_bps_rate_limit": c.Init("CompressBpsRateLim"), - "zlib_mem_usage": c.Init("ZlibMemUsage"), - "max_zlib_mem_usage": c.Init("MaxZlibMemUsage"), - "tasks": c.Init("Tasks"), - "run_queue": c.Init("Run_queue"), - "idle_pct": c.Init("Idle_pct"), + "pxname": c.Str("pxname"), + "svname": c.Str("svname"), + "qcur": c.Int("qcur"), + "qmax": c.Int("qmax"), + "scur": c.Int("scur"), + "smax": c.Int("smax"), + "slim": c.Int("slim"), + "stot": c.Int("stot"), + "bin": c.Int("bin"), + "bout": c.Int("bout"), + "dreq": c.Int("dreq"), + "dresp": c.Int("dresp"), + "ereq": c.Int("ereq"), + "econ": c.Int("econ"), + "eresp": c.Int("eresp"), + "wretr": c.Int("wretr"), + "wredis": c.Int("wredis"), + "status": c.Str("status"), + "weight": c.Int("weight"), + "act": c.Int("act"), + "bck": c.Int("bck"), + "chkfail": c.Int("chkfail"), + "chkdown": c.Int("chkdown"), + "lastchg": c.Int("lastchg"), + "downtime": c.Int("downtime"), + "qlimit": c.Int("qlimit"), + "pid": c.Int("pid"), + "iid": c.Int("iid"), + "sid": c.Int("sid"), + "throttle": c.Int("throttle"), + "lbtot": c.Int("lbtot"), + "tracked": c.Int("tracked"), + "type": c.Int("type"), + "rate": c.Int("rate"), + "rate_lim": c.Int("rate_lim"), + "rate_max": c.Int("rate_max"), + "check_status": c.Str("check_status"), + "check_code": c.Int("check_code"), + "check_duration": c.Int("check_duration"), + "hrsp_1xx": c.Int("hrsp_1xx"), + "hrsp_2xx": c.Int("hrsp_2xx"), + "hrsp_3xx": c.Int("hrsp_3xx"), + "hrsp_4xx": c.Int("hrsp_4xx"), + "hrsp_5xx": c.Int("hrsp_5xx"), + "hrsp_other": c.Int("hrsp_other"), + "hanafail": c.Int("hanafail"), + "req_rate": c.Int("req_rate"), + "req_rate_max": c.Int("req_rate_max"), + "req_tot": c.Int("req_tot"), + "cli_abrt": c.Int("cli_abrt"), + "srv_abrt": c.Int("srv_abrt"), + "comp_in": c.Int("comp_in"), + "comp_out": c.Int("comp_out"), + "comp_byp": c.Int("comp_byp"), + "comp_rsp": c.Int("comp_rsp"), + "lastsess": c.Int("lastsess"), + "last_chk": c.Str("last_chk"), + "last_agt": c.Int("last_agt"), + "qtime": c.Int("qtime"), + "ctime": c.Int("ctime"), + "rtime": c.Int("rtime"), + "ttime": c.Int("ttime"), } ) -func parseResponse(data []byte) map[string]string { - resultMap := map[string]string{} +func parseResponse(data []byte) []map[string]string { + + var results []map[string]string + str := string(data) - for _, ln := range strings.Split(str, "\n") { - parts := strings.Split(strings.Trim(ln, " "), ":") - if parts[0] == "Name" || parts[0] == "Version" || parts[0] == "Release_date" || parts[0] == "Uptime" { + fieldNames := []string{} + + for lnNum, ln := range strings.Split(str, "\n") { + + // If the line by any chance is empty, then skip it + ln := strings.Trim(ln, " ") + if ln == "" { + continue + } + + // Now split the line on each comma and if there isn + ln = strings.Trim(ln, ",") + parts := strings.Split(strings.Trim(ln, " "), ",") + if len(parts) != 62 { + continue + } + + // For the first row, keep the column names and continue + if lnNum == 0 { + fieldNames = parts continue } - resultMap[parts[0]] = strings.Trim(parts[1], " ") + + res := map[string]string{} + for i, v := range parts { + res[fieldNames[i]] = v + } + + results = append(results, res) + } + return results } // Map data to MapStr -func eventMapping(info map[string]string) common.MapStr { - // Full mapping from info +func eventMapping(info []map[string]string) []common.MapStr { + + var events []common.MapStr + source := map[string]interface{}{} - for key, val := range info { - source[key] = val + + for _, evt := range info { + source = map[string]interface{}{} + for key, val := range evt { + source[key] = val + } + events = append(events, schema.Apply(source)) } - return schema.Apply(source) + + return events } diff --git a/metricbeat/module/haproxy/stat/stat.go b/metricbeat/module/haproxy/stat/stat.go index 01b215505c8..2ad65618c90 100644 --- a/metricbeat/module/haproxy/stat/stat.go +++ b/metricbeat/module/haproxy/stat/stat.go @@ -1,11 +1,11 @@ package stat import ( + "errors" "fmt" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/metricbeat/mb" - s "github.com/elastic/beats/metricbeat/schema" - c "github.com/elastic/beats/metricbeat/schema/mapstrstr" + "net" ) const ( @@ -17,7 +17,7 @@ const ( // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { - if err := mb.Registry.AddMetricSet("haproxy", "stat", New); err != nil { + if err := mb.Registry.AddMetricSet("haproxy", statsMethod, New); err != nil { panic(err) } } @@ -38,8 +38,6 @@ type MetricSet struct { // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - //config := struct{}{} - config := struct { StatsMethod string `config:"stats_method"` StatsPath string `config:"stats_path"` @@ -63,25 +61,42 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch methods implements the data gathering and data conversion to the right format // It returns the event which is then forward to the output. In case of an error, a // descriptive error must be returned. -func (m *MetricSet) Fetch() (common.MapStr, error) { +func (m *MetricSet) Fetch() ([]common.MapStr, error) { + + //var metricSetSlice []common.MapStr - var output []byte if m.statsMethod == "unix_socket" { - c, err := net.Dial("unix", config.StatsSocket) - buf := make([]byte, 4096) + m.counter++ + + c, err := net.Dial("unix", m.statsPath) if err != nil { return nil, fmt.Errorf(fmt.Sprintf("HAProxy %s error: %s", statsMethod, err)) } + defer c.Close() + // Write the command to the socket _, err = c.Write([]byte(fmt.Sprintf("show %s\n", statsMethod))) - oputut, err := c.Read(buf) + if err != nil { + return nil, fmt.Errorf("Socket write error: %s", err) + } + + // Now read from the socket + buf := make([]byte, 2048) + for { + _, err := c.Read(buf[:]) + if err != nil { + return nil, err + } + return eventMapping(parseResponse(buf)), nil + } } else { + // Get the data from the HTTP URI + m.counter++ } - m.counter++ + return nil, errors.New("Error getting HAProxy stat") - return eventMapping(parseResponse(output)), nil } From 8d9d696f25babef666f61fc003abb0d38525efd0 Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Tue, 16 Aug 2016 06:41:39 -0400 Subject: [PATCH 03/19] Added haproxy package to fetch stat/info commands --- metricbeat/module/haproxy/haproxy.go | 219 +++++++++++++++++++++++++++ 1 file changed, 219 insertions(+) create mode 100644 metricbeat/module/haproxy/haproxy.go diff --git a/metricbeat/module/haproxy/haproxy.go b/metricbeat/module/haproxy/haproxy.go new file mode 100644 index 00000000000..b7d622bf7ba --- /dev/null +++ b/metricbeat/module/haproxy/haproxy.go @@ -0,0 +1,219 @@ +package haproxy + +import ( + "bytes" + "encoding/csv" + "errors" + "fmt" + "github.com/gocarina/gocsv" + "io" + "net" + "strings" +) + +// Stat is an instance of the HAProxy stat information +type Stat struct { + PxName string `csv:"# pxname"` + SvName string `csv:"svname"` + Qcur string `csv:"qcur"` + Qmax string `csv:"qmax"` + Scur string `csv:"scur"` + Smax string `csv:"smax"` + Slim string `csv:"slim"` + Stot string `csv:"stot"` + Bin string `csv:"bin"` + Bout string `csv:"bout"` + Dreq string `csv:"dreq"` + Dresp string `csv:"dresp"` + Ereq string `csv:"ereq"` + Econ string `csv:"econ"` + Eresp string `csv:"eresp"` + Wretr string `csv:"wretr"` + Wredis string `csv:"wredis"` + Status string `csv:"status"` + Weight string `csv:"weight"` + Act string `csv:"act"` + Bck string `csv:"bck"` + ChkFail string `csv:"chkfail"` + ChkDown string `csv:"chkdown"` + Lastchg string `csv:"lastchg"` + Downtime string `csv:"downtime"` + Qlimit string `csv:"qlimit"` + Pid string `csv:"pid"` + Iid string `csv:"iid"` + Sid string `csv:"sid"` + Throttle string `csv:"throttle"` + Lbtot string `csv:"lbtot"` + Tracked string `csv:"tracked"` + Type string `csv:"type"` + Rate string `csv:"rate"` + RateLim string `csv:"rate_lim"` + RateMax string `csv:"rate_max"` + CheckStatus string `csv:"check_status"` + CheckCode string `csv:"check_code"` + CheckDuration string `csv:"check_duration"` + Hrsp1xx string `csv:"hrsp_1xx"` + Hrsp2xx string `csv:"hrsp_2xx"` + Hrsp3xx string `csv:"hrsp_3xx"` + Hrsp4xx string `csv:"hrsp_4xx"` + Hrsp5xx string `csv:"hrsp_5xx"` + HrspOther string `csv:"hrsp_other"` + Hanafail string `csv:"hanafail"` + ReqRate string `csv:"req_rate"` + ReqRateMax string `csv:"req_rate_max"` + ReqTot string `csv:"req_tot"` + CliAbrt string `csv:"cli_abrt"` + SrvAbrt string `csv:"srv_abrt"` + CompIn string `csv:"comp_in"` + CompOut string `csv:"comp_out"` + CompByp string `csv:"comp_byp"` + CompRsp string `csv:"comp_rsp"` + LastSess string `csv:"lastsess"` + LastChk string `csv:"last_chk"` + LastAgt string `csv:"last_agt"` + Qtime string `csv:"qtime"` + Ctime string `csv:"ctime"` + Rtime string `csv:"rtime"` + Ttime string `csv:"ttime"` +} + +/* +type Stat struct { + PxName string `csv:"# pxname"` + SvName string `csv:"svname"` + Qcur uint64 `csv:"qcur"` + Qmax uint64 `csv:"qmax"` + Scur uint64 `csv:"scur"` + Smax uint64 `csv:"smax"` + Slim uint64 `csv:"slim"` + Stot uint64 `csv:"stot"` + Bin uint64 `csv:"bin"` + Bout uint64 `csv:"bout"` + Dreq uint64 `csv:"dreq"` + Dresp uint64 `csv:"dresp"` + Ereq uint64 `csv:"ereq"` + Econ uint64 `csv:"econ"` + Eresp uint64 `csv:"eresp"` + Wretr uint64 `csv:"wretr"` + Wredis uint64 `csv:"wredis"` + Status string `csv:"status"` + Weight uint64 `csv:"weight"` + Act uint64 `csv:"act"` + Bck uint64 `csv:"bck"` + ChkFail uint64 `csv:"chkfail"` + ChkDown uint64 `csv:"chkdown"` + Lastchg uint64 `csv:"lastchg"` + Downtime uint64 `csv:"downtime"` + Qlimit uint64 `csv:"qlimit"` + Pid uint64 `csv:"pid"` + Iid uint64 `csv:"iid"` + Sid uint64 `csv:"sid"` + Throttle uint64 `csv:"throttle"` + Lbtot uint64 `csv:"lbtot"` + Tracked uint64 `csv:"tracked"` + Type uint64 `csv:"type"` + Rate uint64 `csv:"rate"` + RateLim uint64 `csv:"rate_lim"` + RateMax uint64 `csv:"rate_max"` + CheckStatus string `csv:"check_status"` + CheckCode uint64 `csv:"check_code"` + CheckDuration uint64 `csv:"check_duration"` + Hrsp1xx uint64 `csv:"hrsp_1xx"` + Hrsp2xx uint64 `csv:"hrsp_2xx"` + Hrsp3xx uint64 `csv:"hrsp_3xx"` + Hrsp4xx uint64 `csv:"hrsp_4xx"` + Hrsp5xx uint64 `csv:"hrsp_5xx"` + HrspOther uint64 `csv:"hrsp_other"` + Hanafail uint64 `csv:"hanafail"` + ReqRate uint64 `csv:"req_rate"` + ReqRateMax uint64 `csv:"req_rate_max"` + ReqTot uint64 `csv:"req_tot"` + CliAbrt uint64 `csv:"cli_abrt"` + SrvAbrt uint64 `csv:"srv_abrt"` + CompIn uint64 `csv:"comp_in"` + CompOut uint64 `csv:"comp_out"` + CompByp uint64 `csv:"comp_byp"` + CompRsp uint64 `csv:"comp_rsp"` + LastSess int64 `csv:"lastsess"` + LastChk string `csv:"last_chk"` + LastAgt uint64 `csv:"last_agt"` + Qtime uint64 `csv:"qtime"` + Ctime uint64 `csv:"ctime"` + Rtime uint64 `csv:"rtime"` + Ttime uint64 `csv:"ttime"` +} +*/ + +// Client is an instance of the HAProxy client +type Client struct { + connection net.Conn + Address string + ProtoScheme string +} + +// NewHaproxyClient returns a new instance of HaproxyClient +func NewHaproxyClient(address string) (*Client, error) { + parts := strings.Split(address, "://") + if len(parts) != 2 { + return nil, errors.New("Must have protocol scheme and address!") + } + + if parts[0] != "tcp" && parts[0] != "unix" { + return nil, errors.New("Invalid Protocol Scheme!") + } + + return &Client{ + Address: parts[1], + ProtoScheme: parts[0], + }, nil +} + +// Run sends a designated command to the haproxy stats socket +func (c *Client) run(cmd string) (*bytes.Buffer, error) { + var conn net.Conn + response := bytes.NewBuffer(nil) + + conn, err := net.Dial(c.ProtoScheme, c.Address) + if err != nil { + return response, err + } + c.connection = conn + + defer c.connection.Close() + + _, err = c.connection.Write([]byte(cmd + "\n")) + if err != nil { + return response, err + } + + _, err = io.Copy(response, c.connection) + if err != nil { + return response, err + } + + if strings.HasPrefix(response.String(), "Unknown command") { + return response, fmt.Errorf("Unknown command: %s", cmd) + } + + return response, nil +} + +// ShowStat returns the result from the 'show stat' command +func (c *Client) GetStat() (statRes []*Stat, err error) { + + runResult, err := c.run("show stat") + if err != nil { + return nil, err + } + + csvReader := csv.NewReader(runResult) + csvReader.TrailingComma = true + + err = gocsv.UnmarshalCSV(csvReader, &statRes) + if err != nil { + return nil, fmt.Errorf("Error parsing CSV: %s", err) + } + + return statRes, nil + +} From dcf16a83ec122930271f5ff7706176d262b312b3 Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Tue, 16 Aug 2016 06:41:55 -0400 Subject: [PATCH 04/19] Updated stat/info metrics --- metricbeat/metricbeat.yml | 7 +- .../module/haproxy/info/_meta/data.json | 76 ++++++-- .../module/haproxy/info/_meta/fields.yml | 2 +- metricbeat/module/haproxy/info/info.go | 7 + .../module/haproxy/stat/_meta/data.json | 78 ++++++-- .../module/haproxy/stat/_meta/docs.asciidoc | 130 +++++++++++++ .../module/haproxy/stat/_meta/fields.yml | 175 ++++++++++++++---- metricbeat/module/haproxy/stat/data.go | 138 +++++++------- metricbeat/module/haproxy/stat/stat.go | 96 ++++++---- 9 files changed, 526 insertions(+), 183 deletions(-) diff --git a/metricbeat/metricbeat.yml b/metricbeat/metricbeat.yml index f66c95f1061..d8e8c98f55a 100644 --- a/metricbeat/metricbeat.yml +++ b/metricbeat/metricbeat.yml @@ -47,11 +47,12 @@ metricbeat.modules: - module: haproxy metricsets: - "stat" - - "info" + # - "info" enabled: true period: 10s - stats_method: "unix_socket" - stats_path: "/Users/alain.lefebvre/tmp/haproxy.sock" + #stats_method: "unix_socket" + stats_addr: "unix:///Users/alain.lefebvre/tmp/haproxy.sock" + #stats_path: "/Users/alain.lefebvre/tmp/haproxy.sock" diff --git a/metricbeat/module/haproxy/info/_meta/data.json b/metricbeat/module/haproxy/info/_meta/data.json index 5f81cf1613a..c21cbc98f2e 100644 --- a/metricbeat/module/haproxy/info/_meta/data.json +++ b/metricbeat/module/haproxy/info/_meta/data.json @@ -1,19 +1,59 @@ { - "@timestamp":"2016-05-23T08:05:34.853Z", - "beat":{ - "hostname":"beathost", - "name":"beathost" - }, - "metricset":{ - "host":"localhost", - "module":"mysql", - "name":"status", - "rtt":44269 - }, - "haproxy":{ - "info":{ - "example": "info" - } - }, - "type":"metricsets" -} + "@timestamp": "2016-08-13T19:44:21.036Z", + "beat": { + "hostname": "beathost", + "name": "beathost" + }, + "haproxy": { + "info": { + "compress_bps_in": 0, + "compress_bps_out": 0, + "compress_bps_rate_limit": 0, + "conn_rate": 0, + "conn_rate_limit": 0, + "cum_conns": 67, + "cum_req": 67, + "cum_ssl_conns": 0, + "curr_conns": 0, + "curr_ssl_conns": 0, + "hard_max_conn": 4000, + "idle_pct": 100, + "max_conn": 4000, + "max_conn_rate": 5, + "max_pipes": 0, + "max_sess_rate": 5, + "max_sock": 8033, + "max_ssl_conns": 0, + "max_ssl_rate": 0, + "max_zlib_mem_usage": 0, + "mem_max_mb": 0, + "nb_proc": 1, + "pid": 53858, + "pipes_free": 0, + "pipes_used": 0, + "process_num": 1, + "run_queue": 2, + "sess_rate": 0, + "sess_rate_limit": 0, + "ssl_babckend_key_rate": 0, + "ssl_backend_max_key_rate": 0, + "ssl_cache_misses": 0, + "ssl_cached_lookups": 0, + "ssl_frontend_key_rate": 0, + "ssl_frontend_max_key_rate": 0, + "ssl_frontend_session_reuse_pct": 0, + "ssl_rate": 0, + "ssl_rate_limit": 0, + "tasks": 7, + "ulimit_n": 8033, + "uptime_sec": 13700, + "zlib_mem_usage": 0 + } + }, + "metricset": { + "module": "haproxy", + "name": "info", + "rtt": 707 + }, + "type": "metricsets" +} \ No newline at end of file diff --git a/metricbeat/module/haproxy/info/_meta/fields.yml b/metricbeat/module/haproxy/info/_meta/fields.yml index 85add22af1b..b488684b201 100644 --- a/metricbeat/module/haproxy/info/_meta/fields.yml +++ b/metricbeat/module/haproxy/info/_meta/fields.yml @@ -1,7 +1,7 @@ - name: info type: group description: > - info + General infomration collected on HAProxy process fields: - name: example type: keyword diff --git a/metricbeat/module/haproxy/info/info.go b/metricbeat/module/haproxy/info/info.go index 10af7eef19d..73bb01c64db 100644 --- a/metricbeat/module/haproxy/info/info.go +++ b/metricbeat/module/haproxy/info/info.go @@ -4,8 +4,11 @@ import ( "errors" "fmt" "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/mb" "net" + //"net/http" + //"net/url" ) const ( @@ -14,6 +17,10 @@ const ( defaultSocket = "/var/lib/haproxy/stats" ) +var ( + debugf = logp.MakeDebug("haproxy-info") +) + // init registers the MetricSet with the central registry. // The New method will be called after the setup of the module and before starting to fetch data func init() { diff --git a/metricbeat/module/haproxy/stat/_meta/data.json b/metricbeat/module/haproxy/stat/_meta/data.json index 8659dd508dc..ab75ae23ebd 100644 --- a/metricbeat/module/haproxy/stat/_meta/data.json +++ b/metricbeat/module/haproxy/stat/_meta/data.json @@ -1,19 +1,61 @@ { - "@timestamp":"2016-05-23T08:05:34.853Z", - "beat":{ - "hostname":"beathost", - "name":"beathost" - }, - "metricset":{ - "host":"localhost", - "module":"mysql", - "name":"status", - "rtt":44269 - }, - "haproxy":{ - "stat":{ - "example": "stat" - } - }, - "type":"metricsets" -} + "@timestamp": "2016-08-13T19:44:21.036Z", + "beat": { + "hostname": "beathost", + "name": "beathost" + }, + "haproxy": { + "stat": { + "act": 1, + "bck": 0, + "bin": 0, + "bout": 0, + "check_duration": 0, + "check_status": "L4CON", + "chkdown": 1, + "chkfail": 1, + "cli_abrt": 0, + "ctime": 0, + "downtime": 13700, + "dresp": 0, + "econ": 0, + "eresp": 0, + "hanafail": 0, + "hrsp_1xx": 0, + "hrsp_2xx": 0, + "hrsp_3xx": 0, + "hrsp_4xx": 0, + "hrsp_5xx": 0, + "hrsp_other": 0, + "iid": 3, + "last_chk": "Connection refused", + "lastchg": 13700, + "lastsess": -1, + "lbtot": 0, + "pid": 1, + "qcur": 0, + "qmax": 0, + "qtime": 0, + "rate": 0, + "rate_max": 0, + "rtime": 0, + "scur": 0, + "sid": 1, + "smax": 0, + "srv_abrt": 0, + "status": "DOWN", + "stot": 0, + "svname": "log1", + "ttime": 0, + "weight": 1, + "wredis": 0, + "wretr": 0 + } + }, + "metricset": { + "module": "haproxy", + "name": "stat", + "rtt": 2118 + }, + "type": "" +} \ No newline at end of file diff --git a/metricbeat/module/haproxy/stat/_meta/docs.asciidoc b/metricbeat/module/haproxy/stat/_meta/docs.asciidoc index 6725bc19c53..52c2085e868 100644 --- a/metricbeat/module/haproxy/stat/_meta/docs.asciidoc +++ b/metricbeat/module/haproxy/stat/_meta/docs.asciidoc @@ -1,3 +1,133 @@ === haproxy stat MetricSet This is the info metricset of the module haproxy. + + +=== Official documentation for fields + +A list of the 'stat' fields along with a basic description can be found here: + +http://www.haproxy.org/download/1.6/doc/management.txt + +The following documentation bellow is an extract from the URL above, more specifically from section "9.1. CSV format" + +[source] +----------------- +In brackets after each field name are the types which may have a value for +that field. The types are L (Listeners), F (Frontends), B (Backends), and +S (Servers). + + 0. pxname [LFBS]: proxy name + 1. svname [LFBS]: service name (FRONTEND for frontend, BACKEND for backend, + any name for server/listener) + 2. qcur [..BS]: current queued requests. For the backend this reports the + number queued without a server assigned. + 3. qmax [..BS]: max value of qcur + 4. scur [LFBS]: current sessions + 5. smax [LFBS]: max sessions + 6. slim [LFBS]: configured session limit + 7. stot [LFBS]: cumulative number of connections + 8. bin [LFBS]: bytes in + 9. bout [LFBS]: bytes out + 10. dreq [LFB.]: requests denied because of security concerns. + - For tcp this is because of a matched tcp-request content rule. + - For http this is because of a matched http-request or tarpit rule. + 11. dresp [LFBS]: responses denied because of security concerns. + - For http this is because of a matched http-request rule, or + "option checkcache". + 12. ereq [LF..]: request errors. Some of the possible causes are: + - early termination from the client, before the request has been sent. + - read error from the client + - client timeout + - client closed connection + - various bad requests from the client. + - request was tarpitted. + 13. econ [..BS]: number of requests that encountered an error trying to + connect to a backend server. The backend stat is the sum of the stat + for all servers of that backend, plus any connection errors not + associated with a particular server (such as the backend having no + active servers). + 14. eresp [..BS]: response errors. srv_abrt will be counted here also. + Some other errors are: + - write error on the client socket (won't be counted for the server stat) + - failure applying filters to the response. + 15. wretr [..BS]: number of times a connection to a server was retried. + 16. wredis [..BS]: number of times a request was redispatched to another + server. The server value counts the number of times that server was + switched away from. + 17. status [LFBS]: status (UP/DOWN/NOLB/MAINT/MAINT(via)...) + 18. weight [..BS]: total weight (backend), server weight (server) + 19. act [..BS]: number of active servers (backend), server is active (server) + 20. bck [..BS]: number of backup servers (backend), server is backup (server) + 21. chkfail [...S]: number of failed checks. (Only counts checks failed when + the server is up.) + 22. chkdown [..BS]: number of UP->DOWN transitions. The backend counter counts + transitions to the whole backend being down, rather than the sum of the + counters for each server. + 23. lastchg [..BS]: number of seconds since the last UP<->DOWN transition + 24. downtime [..BS]: total downtime (in seconds). The value for the backend + is the downtime for the whole backend, not the sum of the server downtime. + 25. qlimit [...S]: configured maxqueue for the server, or nothing in the + value is 0 (default, meaning no limit) + 26. pid [LFBS]: process id (0 for first instance, 1 for second, ...) + 27. iid [LFBS]: unique proxy id + 28. sid [L..S]: server id (unique inside a proxy) + 29. throttle [...S]: current throttle percentage for the server, when + slowstart is active, or no value if not in slowstart. + 30. lbtot [..BS]: total number of times a server was selected, either for new + sessions, or when re-dispatching. The server counter is the number + of times that server was selected. + 31. tracked [...S]: id of proxy/server if tracking is enabled. + 32. type [LFBS]: (0=frontend, 1=backend, 2=server, 3=socket/listener) + 33. rate [.FBS]: number of sessions per second over last elapsed second + 34. rate_lim [.F..]: configured limit on new sessions per second + 35. rate_max [.FBS]: max number of new sessions per second + 36. check_status [...S]: status of last health check, one of: + UNK -> unknown + INI -> initializing + SOCKERR -> socket error + L4OK -> check passed on layer 4, no upper layers testing enabled + L4TOUT -> layer 1-4 timeout + L4CON -> layer 1-4 connection problem, for example + "Connection refused" (tcp rst) or "No route to host" (icmp) + L6OK -> check passed on layer 6 + L6TOUT -> layer 6 (SSL) timeout + L6RSP -> layer 6 invalid response - protocol error + L7OK -> check passed on layer 7 + L7OKC -> check conditionally passed on layer 7, for example 404 with + disable-on-404 + L7TOUT -> layer 7 (HTTP/SMTP) timeout + L7RSP -> layer 7 invalid response - protocol error + L7STS -> layer 7 response error, for example HTTP 5xx + 37. check_code [...S]: layer5-7 code, if available + 38. check_duration [...S]: time in ms took to finish last health check + 39. hrsp_1xx [.FBS]: http responses with 1xx code + 40. hrsp_2xx [.FBS]: http responses with 2xx code + 41. hrsp_3xx [.FBS]: http responses with 3xx code + 42. hrsp_4xx [.FBS]: http responses with 4xx code + 43. hrsp_5xx [.FBS]: http responses with 5xx code + 44. hrsp_other [.FBS]: http responses with other codes (protocol error) + 45. hanafail [...S]: failed health checks details + 46. req_rate [.F..]: HTTP requests per second over last elapsed second + 47. req_rate_max [.F..]: max number of HTTP requests per second observed + 48. req_tot [.F..]: total number of HTTP requests received + 49. cli_abrt [..BS]: number of data transfers aborted by the client + 50. srv_abrt [..BS]: number of data transfers aborted by the server + (inc. in eresp) + 51. comp_in [.FB.]: number of HTTP response bytes fed to the compressor + 52. comp_out [.FB.]: number of HTTP response bytes emitted by the compressor + 53. comp_byp [.FB.]: number of bytes that bypassed the HTTP compressor + (CPU/BW limit) + 54. comp_rsp [.FB.]: number of HTTP responses that were compressed + 55. lastsess [..BS]: number of seconds since last session assigned to + server/backend + 56. last_chk [...S]: last health check contents or textual error + 57. last_agt [...S]: last agent check contents or textual error + 58. qtime [..BS]: the average queue time in ms over the 1024 last requests + 59. ctime [..BS]: the average connect time in ms over the 1024 last requests + 60. rtime [..BS]: the average response time in ms over the 1024 last requests + (0 for TCP) + 61. ttime [..BS]: the average total session time in ms over the 1024 last + requests + +----------------- \ No newline at end of file diff --git a/metricbeat/module/haproxy/stat/_meta/fields.yml b/metricbeat/module/haproxy/stat/_meta/fields.yml index 947e3612db3..ca43e8f4283 100644 --- a/metricbeat/module/haproxy/stat/_meta/fields.yml +++ b/metricbeat/module/haproxy/stat/_meta/fields.yml @@ -1,261 +1,356 @@ - name: stat type: group description: > - stat + Stats collected from HAProxy process fields: - name: pxname type: string description: > - [DESC] + proxy name - name: svname type: string description: > - [DESC] + service name (FRONTEND for frontend, BACKEND for backend, any name for server/listener) - name: qcur type: integer description: > - Currrent queued requess + current queued requests. For the backend this reports the number queued without a server assigned - name: qmax type: integer description: > - Max queued requests + max value of qcur - name: scur type: integer description: > - Current sessions + current sessions - name: smax type: integer description: > - Max sessions + max sessions - name: slim type: integer description: > - Session limit + configured session limit - name: stot type: string description: > - Total sessions + cumulative number of connections - name: bin type: integer description: > - Bytes in + bytes in - name: bout type: integer description: > - Bytes out + bytes out - name: dreq type: integer description: > - Denied Requests + requests denied because of security concerns. + * For tcp this is because of a matched tcp-request content rule. + * For http this is because of a matched http-request or tarpit rule. - name: dresp type: integer description: > - Denied Responses + responses denied because of security concerns. + * For http this is because of a matched http-request rule, or "option checkcache". - name: ereq type: integer description: > - Request Errors - + request errors. Some of the possible causes are: + * early termination from the client, before the request has been sent. + * read error from the client + * client timeout + * client closed connection + * various bad requests from the client. + * request was tarpitted. + - name: econ - type: string + type: integer description: > - + number of requests that encountered an error trying to + connect to a backend server. The backend stat is the sum of the stat + for all servers of that backend, plus any connection errors not + associated with a particular server (such as the backend having no + active servers). + - name: eresp type: integer description: > - + response errors. srv_abrt will be counted here also. + Some other errors are: + * write error on the client socket (won't be counted for the server stat) + * failure applying filters to the response. + - name: wretr type: integer description: > - + number of times a connection to a server was retried. + - name: wredis type: integer description: > - + number of times a request was redispatched to another + server. The server value counts the number of times that server was + switched away from. + - name: status - type: integer - description: > - - - name: weight type: string description: > - + status (UP/DOWN/NOLB/MAINT/MAINT(via)...) + + - name: weight + type: integer + description: > + total weight (backend), server weight (server) + - name: act type: integer description: > - + number of active servers (backend), server is active (server) + - name: bck type: integer description: > - + number of backup servers (backend), server is backup (server) + - name: chkfail type: integer description: > - + number of failed checks. (Only counts checks failed when + the server is up.) + - name: chkdown type: integer description: > + number of UP->DOWN transitions. The backend counter counts + transitions to the whole backend being down, rather than the sum of the + counters for each server. - name: lastchg - type: string + type: integer description: > - + number of seconds since the last UP<->DOWN transition + - name: downtime type: integer description: > - + total downtime (in seconds). The value for the backend + is the downtime for the whole backend, not the sum of the server downtime. + - name: qlimit type: integer description: > - + configured maxqueue for the server, or nothing in the + value is 0 (default, meaning no limit) + - name: pid type: integer description: > - + process id (0 for first instance, 1 for second, ...) + - name: iid type: integer description: > + unique proxy id - name: sid - type: string + type: integer description: > - + server id (unique inside a proxy) + - name: throttle type: integer description: > - + current throttle percentage for the server, when + slowstart is active, or no value if not in slowstart. + - name: lbtot type: integer description: > - + total number of times a server was selected, either for new + sessions, or when re-dispatching. The server counter is the number + of times that server was selected. + - name: tracked type: integer description: > - + id of proxy/server if tracking is enabled. + - name: type type: integer description: > + (0=frontend, 1=backend, 2=server, 3=socket/listener) - name: rate type: integer description: > + number of sessions per second over last elapsed second - name: rate_lim type: integer description: > + configured limit on new sessions per second - name: rate_max type: integer description: > + max number of new sessions per second - name: check_status - type: integer + type: string description: > + status of last health check, one of: + UNK -> unknown + INI -> initializing + SOCKERR -> socket error + L4OK -> check passed on layer 4, no upper layers testing enabled + L4TOUT -> layer 1-4 timeout + L4CON -> layer 1-4 connection problem, for example + "Connection refused" (tcp rst) or "No route to host" (icmp) + L6OK -> check passed on layer 6 + L6TOUT -> layer 6 (SSL) timeout + L6RSP -> layer 6 invalid response - protocol error + L7OK -> check passed on layer 7 + L7OKC -> check conditionally passed on layer 7, for example 404 with + disable-on-404 + L7TOUT -> layer 7 (HTTP/SMTP) timeout + L7RSP -> layer 7 invalid response - protocol error + L7STS -> layer 7 response error, for example HTTP 5xx - name: check_code type: integer description: > + layer5-7 code, if available - name: check_duration type: integer description: > + time in ms took to finish last health check + + - name: hrsp_1xx + type: integer + description: > + http responses with 1xx code - name: hrsp_2xx type: integer description: > + http responses with 2xx code - name: hrsp_3xx type: integer description: > + http responses with 3xx code - name: hrsp_4xx type: integer description: > + http responses with 4xx code - name: hrsp_5xx type: integer description: > + http responses with 5xx code - name: hrsp_other type: integer description: > + http responses with other codes (protocol error) - name: hanafail type: integer description: > + failed health checks details - name: req_rate type: integer description: > + HTTP requests per second over last elapsed second - name: req_rate_max type: integer description: > + max number of HTTP requests per second observed - name: req_tot type: integer description: > + total number of HTTP requests received - name: cli_abrt type: integer description: > + number of data transfers aborted by the client - name: srv_abrt type: integer description: > + number of data transfers aborted by the server (inc. in eresp) - name: comp_in type: integer description: > + number of HTTP response bytes fed to the compressor - name: comp_out type: integer description: > + number of HTTP response bytes emitted by the compressor - name: comp_byp type: integer description: > + number of bytes that bypassed the HTTP compressor (CPU/BW limit) - name: comp_rsp type: integer description: > + number of HTTP responses that were compressed - name: lastsess type: integer description: > + number of seconds since last session assigned to server/backend - name: last_chk - type: integer + type: string description: > + last health check contents or textual error - name: last_agt type: integer description: > + llast agent check contents or textual error - name: qtime type: integer description: > + the average queue time in ms over the 1024 last requests - name: ctime type: integer description: > + the average connect time in ms over the 1024 last requests - name: rtime type: integer description: > + the average response time in ms over the 1024 last requests (0 for TCP) - name: ttime type: integer description: > + the average total session time in ms over the 1024 last requests \ No newline at end of file diff --git a/metricbeat/module/haproxy/stat/data.go b/metricbeat/module/haproxy/stat/data.go index 2d77164f18e..6450b2e1072 100644 --- a/metricbeat/module/haproxy/stat/data.go +++ b/metricbeat/module/haproxy/stat/data.go @@ -2,75 +2,77 @@ package stat import ( "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/metricbeat/module/haproxy" s "github.com/elastic/beats/metricbeat/schema" c "github.com/elastic/beats/metricbeat/schema/mapstrstr" + "reflect" "strings" ) var ( schema = s.Schema{ - "pxname": c.Str("pxname"), - "svname": c.Str("svname"), - "qcur": c.Int("qcur"), - "qmax": c.Int("qmax"), - "scur": c.Int("scur"), - "smax": c.Int("smax"), - "slim": c.Int("slim"), - "stot": c.Int("stot"), - "bin": c.Int("bin"), - "bout": c.Int("bout"), - "dreq": c.Int("dreq"), - "dresp": c.Int("dresp"), - "ereq": c.Int("ereq"), - "econ": c.Int("econ"), - "eresp": c.Int("eresp"), - "wretr": c.Int("wretr"), - "wredis": c.Int("wredis"), - "status": c.Str("status"), - "weight": c.Int("weight"), - "act": c.Int("act"), - "bck": c.Int("bck"), - "chkfail": c.Int("chkfail"), - "chkdown": c.Int("chkdown"), - "lastchg": c.Int("lastchg"), - "downtime": c.Int("downtime"), - "qlimit": c.Int("qlimit"), - "pid": c.Int("pid"), - "iid": c.Int("iid"), - "sid": c.Int("sid"), - "throttle": c.Int("throttle"), - "lbtot": c.Int("lbtot"), - "tracked": c.Int("tracked"), - "type": c.Int("type"), - "rate": c.Int("rate"), - "rate_lim": c.Int("rate_lim"), - "rate_max": c.Int("rate_max"), - "check_status": c.Str("check_status"), - "check_code": c.Int("check_code"), - "check_duration": c.Int("check_duration"), - "hrsp_1xx": c.Int("hrsp_1xx"), - "hrsp_2xx": c.Int("hrsp_2xx"), - "hrsp_3xx": c.Int("hrsp_3xx"), - "hrsp_4xx": c.Int("hrsp_4xx"), - "hrsp_5xx": c.Int("hrsp_5xx"), - "hrsp_other": c.Int("hrsp_other"), - "hanafail": c.Int("hanafail"), - "req_rate": c.Int("req_rate"), - "req_rate_max": c.Int("req_rate_max"), - "req_tot": c.Int("req_tot"), - "cli_abrt": c.Int("cli_abrt"), - "srv_abrt": c.Int("srv_abrt"), - "comp_in": c.Int("comp_in"), - "comp_out": c.Int("comp_out"), - "comp_byp": c.Int("comp_byp"), - "comp_rsp": c.Int("comp_rsp"), - "lastsess": c.Int("lastsess"), - "last_chk": c.Str("last_chk"), - "last_agt": c.Int("last_agt"), - "qtime": c.Int("qtime"), - "ctime": c.Int("ctime"), - "rtime": c.Int("rtime"), - "ttime": c.Int("ttime"), + "pxname": c.Str("PxName"), + "svname": c.Str("SvName"), + "qcur": c.Int("Qcur"), + "qmax": c.Int("Qmax"), + "scur": c.Int("Scur"), + "smax": c.Int("Smax"), + "slim": c.Int("Slim"), + "stot": c.Int("Stot"), + "bin": c.Int("Bin"), + "bout": c.Int("Bout"), + "dreq": c.Int("Dreq"), + "dresp": c.Int("Dresp"), + "ereq": c.Int("Ereq"), + "econ": c.Int("Econ"), + "eresp": c.Int("Eresp"), + "wretr": c.Int("Wretr"), + "wredis": c.Int("Wredis"), + "status": c.Str("Status"), + "weight": c.Int("Weight"), + "act": c.Int("Act"), + "bck": c.Int("Bck"), + "chkfail": c.Int("Chkfail"), + "chkdown": c.Int("Chkdown"), + "lastchg": c.Int("Lastchg"), + "downtime": c.Int("Downtime"), + "qlimit": c.Int("Qlimit"), + "pid": c.Int("Pid"), + "iid": c.Int("Iid"), + "sid": c.Int("Sid"), + "throttle": c.Int("Throttle"), + "lbtot": c.Int("Lbtot"), + "tracked": c.Int("Tracked"), + "type": c.Int("Type"), + "rate": c.Int("Rate"), + "rate_lim": c.Int("RateLim"), + "rate_max": c.Int("RateMax"), + "check_status": c.Str("CheckStatus"), + "check_code": c.Int("CheckCode"), + "check_duration": c.Int("CheckDuration"), + "hrsp_1xx": c.Int("Hrsp1xx"), + "hrsp_2xx": c.Int("Hrsp2xx"), + "hrsp_3xx": c.Int("Hrsp3xx"), + "hrsp_4xx": c.Int("Hrsp4xx"), + "hrsp_5xx": c.Int("Hrsp5xx"), + "hrsp_other": c.Int("HrspOther"), + "hanafail": c.Int("Hanafail"), + "req_rate": c.Int("ReqRate"), + "req_rate_max": c.Int("ReqRateMax"), + "req_tot": c.Int("ReqTot"), + "cli_abrt": c.Int("CliAbrt"), + "srv_abrt": c.Int("SrvAbrt"), + "comp_in": c.Int("CompIn"), + "comp_out": c.Int("CompOut"), + "comp_byp": c.Int("CompByp"), + "comp_rsp": c.Int("CompRsp"), + "lastsess": c.Int("Lastsess"), + "last_chk": c.Str("LastChk"), + "last_agt": c.Int("LastAgt"), + "qtime": c.Int("Qtime"), + "ctime": c.Int("Ctime"), + "rtime": c.Int("Rtime"), + "ttime": c.Int("Ttime"), } ) @@ -114,16 +116,20 @@ func parseResponse(data []byte) []map[string]string { } // Map data to MapStr -func eventMapping(info []map[string]string) []common.MapStr { +func eventMapping(info []*haproxy.Stat) []common.MapStr { var events []common.MapStr - source := map[string]interface{}{} for _, evt := range info { + st := reflect.ValueOf(evt).Elem() + typeOfT := st.Type() source = map[string]interface{}{} - for key, val := range evt { - source[key] = val + + for i := 0; i < st.NumField(); i++ { + f := st.Field(i) + source[typeOfT.Field(i).Name] = f.Interface() + } events = append(events, schema.Apply(source)) } diff --git a/metricbeat/module/haproxy/stat/stat.go b/metricbeat/module/haproxy/stat/stat.go index 2ad65618c90..ef30cfc2110 100644 --- a/metricbeat/module/haproxy/stat/stat.go +++ b/metricbeat/module/haproxy/stat/stat.go @@ -1,17 +1,19 @@ package stat import ( - "errors" + //"errors" "fmt" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/metricbeat/mb" - "net" + "github.com/elastic/beats/metricbeat/module/haproxy" + //"net" ) const ( // defaultSocket is the default path to the unix socket tfor stats on haproxy. - statsMethod = "stat" - defaultSocket = "/var/lib/haproxy/stats" + statsMethod = "stat" + defaultAddr = "unix:///var/lib/haproxy/stats" + //defaultHttpPath = "http://localhost:8000/haproxy?stats;csv" ) // init registers the MetricSet with the central registry. @@ -28,9 +30,10 @@ func init() { // multiple fetch calls. type MetricSet struct { mb.BaseMetricSet - statsMethod string - statsPath string - counter int + //statsMethod string + //statsPath string + statsAddr string + counter int } // New create a new instance of the MetricSet @@ -39,11 +42,13 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { config := struct { - StatsMethod string `config:"stats_method"` - StatsPath string `config:"stats_path"` + //StatsMethod string `config:"stats_method"` + //StatsPath string `config:"stats_path"` + StatsAddr string `config:"stats_addr"` }{ - StatsMethod: "unix_socket", - StatsPath: defaultSocket, + //StatsMethod: "unix_socket", + //StatsPath: defaultSocket, + StatsAddr: defaultAddr, } if err := base.Module().UnpackConfig(&config); err != nil { @@ -52,9 +57,10 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return &MetricSet{ BaseMetricSet: base, - statsMethod: config.StatsMethod, - statsPath: config.StatsPath, - counter: 1, + //statsMethod: config.StatsMethod, + //statsPath: config.StatsPath, + statsAddr: config.StatsAddr, + counter: 1, }, nil } @@ -65,38 +71,54 @@ func (m *MetricSet) Fetch() ([]common.MapStr, error) { //var metricSetSlice []common.MapStr - if m.statsMethod == "unix_socket" { + /* + if m.statsMethod == "unix_socket" { - m.counter++ + m.counter++ - c, err := net.Dial("unix", m.statsPath) - if err != nil { - return nil, fmt.Errorf(fmt.Sprintf("HAProxy %s error: %s", statsMethod, err)) - } - defer c.Close() - - // Write the command to the socket - _, err = c.Write([]byte(fmt.Sprintf("show %s\n", statsMethod))) - if err != nil { - return nil, fmt.Errorf("Socket write error: %s", err) - } + c, err := net.Dial("unix", m.statsPath) + if err != nil { + return nil, fmt.Errorf(fmt.Sprintf("HAProxy %s error: %s", statsMethod, err)) + } + defer c.Close() - // Now read from the socket - buf := make([]byte, 2048) - for { - _, err := c.Read(buf[:]) + // Write the command to the socket + _, err = c.Write([]byte(fmt.Sprintf("show %s\n", statsMethod))) if err != nil { - return nil, err + return nil, fmt.Errorf("Socket write error: %s", err) + } + + // Now read from the socket + buf := make([]byte, 2048) + for { + _, err := c.Read(buf[:]) + if err != nil { + return nil, err + } + return eventMapping(parseResponse(buf)), nil } - return eventMapping(parseResponse(buf)), nil + + } else { + // Get the data from the HTTP URI + m.counter++ + } - } else { - // Get the data from the HTTP URI - m.counter++ + return nil, errors.New("Error getting HAProxy stat") + */ + + hapc, err := haproxy.NewHaproxyClient(m.statsAddr) + if err != nil { + return nil, fmt.Errorf(fmt.Sprintf("HAProxy Client error: %s", err)) + } + + res, err := hapc.GetStat() + if err != nil { + return nil, fmt.Errorf(fmt.Sprintf("HAProxy Client error fetching %s: %s", statsMethod, err)) } + m.counter++ - return nil, errors.New("Error getting HAProxy stat") + return eventMapping(res), nil } From 16dafbea5dce8ed39a89fcadf6e6f719cf03aaa8 Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Sun, 21 Aug 2016 08:32:33 -0400 Subject: [PATCH 05/19] Comitting current changes --- metricbeat/metricbeat.yml | 6 +- metricbeat/module/haproxy/haproxy.go | 157 ++++++++++++++----------- metricbeat/module/haproxy/info/data.go | 17 ++- metricbeat/module/haproxy/info/info.go | 60 +++------- metricbeat/module/haproxy/stat/stat.go | 56 ++------- 5 files changed, 131 insertions(+), 165 deletions(-) diff --git a/metricbeat/metricbeat.yml b/metricbeat/metricbeat.yml index d8e8c98f55a..ac215c28bed 100644 --- a/metricbeat/metricbeat.yml +++ b/metricbeat/metricbeat.yml @@ -47,12 +47,10 @@ metricbeat.modules: - module: haproxy metricsets: - "stat" - # - "info" + - "info" enabled: true period: 10s - #stats_method: "unix_socket" - stats_addr: "unix:///Users/alain.lefebvre/tmp/haproxy.sock" - #stats_path: "/Users/alain.lefebvre/tmp/haproxy.sock" + stats_addr: "unix:///tmp/haproxy-stats.sock" diff --git a/metricbeat/module/haproxy/haproxy.go b/metricbeat/module/haproxy/haproxy.go index b7d622bf7ba..62e68598cb6 100644 --- a/metricbeat/module/haproxy/haproxy.go +++ b/metricbeat/module/haproxy/haproxy.go @@ -6,7 +6,9 @@ import ( "errors" "fmt" "github.com/gocarina/gocsv" + "github.com/mitchellh/mapstructure" "io" + "io/ioutil" "net" "strings" ) @@ -77,72 +79,56 @@ type Stat struct { Ttime string `csv:"ttime"` } -/* -type Stat struct { - PxName string `csv:"# pxname"` - SvName string `csv:"svname"` - Qcur uint64 `csv:"qcur"` - Qmax uint64 `csv:"qmax"` - Scur uint64 `csv:"scur"` - Smax uint64 `csv:"smax"` - Slim uint64 `csv:"slim"` - Stot uint64 `csv:"stot"` - Bin uint64 `csv:"bin"` - Bout uint64 `csv:"bout"` - Dreq uint64 `csv:"dreq"` - Dresp uint64 `csv:"dresp"` - Ereq uint64 `csv:"ereq"` - Econ uint64 `csv:"econ"` - Eresp uint64 `csv:"eresp"` - Wretr uint64 `csv:"wretr"` - Wredis uint64 `csv:"wredis"` - Status string `csv:"status"` - Weight uint64 `csv:"weight"` - Act uint64 `csv:"act"` - Bck uint64 `csv:"bck"` - ChkFail uint64 `csv:"chkfail"` - ChkDown uint64 `csv:"chkdown"` - Lastchg uint64 `csv:"lastchg"` - Downtime uint64 `csv:"downtime"` - Qlimit uint64 `csv:"qlimit"` - Pid uint64 `csv:"pid"` - Iid uint64 `csv:"iid"` - Sid uint64 `csv:"sid"` - Throttle uint64 `csv:"throttle"` - Lbtot uint64 `csv:"lbtot"` - Tracked uint64 `csv:"tracked"` - Type uint64 `csv:"type"` - Rate uint64 `csv:"rate"` - RateLim uint64 `csv:"rate_lim"` - RateMax uint64 `csv:"rate_max"` - CheckStatus string `csv:"check_status"` - CheckCode uint64 `csv:"check_code"` - CheckDuration uint64 `csv:"check_duration"` - Hrsp1xx uint64 `csv:"hrsp_1xx"` - Hrsp2xx uint64 `csv:"hrsp_2xx"` - Hrsp3xx uint64 `csv:"hrsp_3xx"` - Hrsp4xx uint64 `csv:"hrsp_4xx"` - Hrsp5xx uint64 `csv:"hrsp_5xx"` - HrspOther uint64 `csv:"hrsp_other"` - Hanafail uint64 `csv:"hanafail"` - ReqRate uint64 `csv:"req_rate"` - ReqRateMax uint64 `csv:"req_rate_max"` - ReqTot uint64 `csv:"req_tot"` - CliAbrt uint64 `csv:"cli_abrt"` - SrvAbrt uint64 `csv:"srv_abrt"` - CompIn uint64 `csv:"comp_in"` - CompOut uint64 `csv:"comp_out"` - CompByp uint64 `csv:"comp_byp"` - CompRsp uint64 `csv:"comp_rsp"` - LastSess int64 `csv:"lastsess"` - LastChk string `csv:"last_chk"` - LastAgt uint64 `csv:"last_agt"` - Qtime uint64 `csv:"qtime"` - Ctime uint64 `csv:"ctime"` - Rtime uint64 `csv:"rtime"` - Ttime uint64 `csv:"ttime"` +type Info struct { + Name string + Version string + ReleaseDate string + Nbproc string + ProcessNum string + Pid string + Uptime string + UptimeSec string + MemMaxMB string + UlimitN string + Maxsock string + Maxconn string + HardMaxconn string + CurrConns string + CumConns string + CumReq string + MaxSslConns string + CurrSslConns string + CumSslConns string + Maxpipes string + PipesUsed string + PipesFree string + ConnRate string + ConnRateLimit string + MaxConnRate string + SessRate string + SessRateLimit string + MaxSessRate string + SslRate string + SslRateLimit string + MaxSslRate string + SslFrontendKeyRate string + SslFrontendMaxKeyRate string + SslFrontendSessionReusePct string + SslBackendKeyRate string + SslBackendMaxKeyRate string + SslCacheLookups string + SslCacheMisses string + CompressBpsIn string + CompressBpsOut string + CompressBpsRateLim string + ZlibMemUsage string + MaxZlibMemUsage string + Tasks string + RunQueue string + IdlePct string + Node string + Description string } -*/ // Client is an instance of the HAProxy client type Client struct { @@ -198,7 +184,7 @@ func (c *Client) run(cmd string) (*bytes.Buffer, error) { return response, nil } -// ShowStat returns the result from the 'show stat' command +// GetStat returns the result from the 'show stat' command func (c *Client) GetStat() (statRes []*Stat, err error) { runResult, err := c.run("show stat") @@ -217,3 +203,42 @@ func (c *Client) GetStat() (statRes []*Stat, err error) { return statRes, nil } + +// GetInfo returns the result from the 'show stat' command +func (c *Client) GetInfo() (infoRes *Info, err error) { + + res, err := c.run("show info") + if err != nil { + return nil, err + } + + if b, err := ioutil.ReadAll(res); err == nil { + + resultMap := map[string]interface{}{} + + for _, ln := range strings.Split(string(b), "\n") { + + ln := strings.Trim(ln, " ") + if ln == "" { + continue + } + + parts := strings.Split(strings.Trim(ln, " "), ":") + if len(parts) != 2 { + continue + } + + resultMap[parts[0]] = strings.Trim(parts[1], " ") + } + + var result *Info + err := mapstructure.Decode(resultMap, &result) + if err != nil { + panic(err) + } + return result, nil + } + + return nil, err + +} diff --git a/metricbeat/module/haproxy/info/data.go b/metricbeat/module/haproxy/info/data.go index aaa3c79a523..3df90664b04 100644 --- a/metricbeat/module/haproxy/info/data.go +++ b/metricbeat/module/haproxy/info/data.go @@ -2,8 +2,10 @@ package info import ( "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/metricbeat/module/haproxy" s "github.com/elastic/beats/metricbeat/schema" c "github.com/elastic/beats/metricbeat/schema/mapstrstr" + "reflect" "strings" ) @@ -80,11 +82,20 @@ func parseResponse(data []byte) map[string]string { } // Map data to MapStr -func eventMapping(info map[string]string) common.MapStr { +func eventMapping(info *haproxy.Info) common.MapStr { // Full mapping from info + source := map[string]interface{}{} - for key, val := range info { - source[key] = val + + st := reflect.ValueOf(info).Elem() + typeOfT := st.Type() + source = map[string]interface{}{} + + for i := 0; i < st.NumField(); i++ { + f := st.Field(i) + source[typeOfT.Field(i).Name] = f.Interface() + } + return schema.Apply(source) } diff --git a/metricbeat/module/haproxy/info/info.go b/metricbeat/module/haproxy/info/info.go index 73bb01c64db..cc6b69fbb11 100644 --- a/metricbeat/module/haproxy/info/info.go +++ b/metricbeat/module/haproxy/info/info.go @@ -1,20 +1,17 @@ package info import ( - "errors" "fmt" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/mb" - "net" - //"net/http" - //"net/url" + "github.com/elastic/beats/metricbeat/module/haproxy" ) const ( // defaultSocket is the default path to the unix socket tfor stats on haproxy. - statsMethod = "info" - defaultSocket = "/var/lib/haproxy/stats" + statsMethod = "info" + defaultAddr = "unix:///var/lib/haproxy/stats" ) var ( @@ -35,9 +32,8 @@ func init() { // multiple fetch calls. type MetricSet struct { mb.BaseMetricSet - statsMethod string - statsPath string - counter int + statsAddr string + counter int } // New create a new instance of the MetricSet @@ -46,11 +42,9 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { config := struct { - StatsMethod string `config:"stats_method"` - StatsPath string `config:"stats_path"` + StatsAddr string `config:"stats_addr"` }{ - StatsMethod: "unix_socket", - StatsPath: defaultSocket, + StatsAddr: defaultAddr, } if err := base.Module().UnpackConfig(&config); err != nil { @@ -59,8 +53,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return &MetricSet{ BaseMetricSet: base, - statsMethod: config.StatsMethod, - statsPath: config.StatsPath, + statsAddr: config.StatsAddr, counter: 1, }, nil } @@ -70,37 +63,18 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // descriptive error must be returned. func (m *MetricSet) Fetch() (common.MapStr, error) { - if m.statsMethod == "unix_socket" { - - m.counter++ - c, err := net.Dial("unix", m.statsPath) - if err != nil { - return nil, fmt.Errorf(fmt.Sprintf("HAProxy %s error: %s", statsMethod, err)) - } - defer c.Close() - - // Write the command to the socket - _, err = c.Write([]byte(fmt.Sprintf("show %s\n", statsMethod))) - if err != nil { - return nil, fmt.Errorf("Socket write error: %s", err) - } - - // Now read from the socket - buf := make([]byte, 2048) - for { - _, err := c.Read(buf[:]) - if err != nil { - return nil, err - } - return eventMapping(parseResponse(buf)), nil - } + hapc, err := haproxy.NewHaproxyClient(m.statsAddr) + if err != nil { + return nil, fmt.Errorf(fmt.Sprintf("HAProxy Client error: %s", err)) + } - } else { - // Get the data from the HTTP URI - m.counter++ + res, err := hapc.GetInfo() + if err != nil { + return nil, fmt.Errorf(fmt.Sprintf("HAProxy Client error fetching %s: %s", statsMethod, err)) } + m.counter++ - return nil, errors.New("Error getting HAProxy info") + return eventMapping(res), nil } diff --git a/metricbeat/module/haproxy/stat/stat.go b/metricbeat/module/haproxy/stat/stat.go index ef30cfc2110..cdb21f0ccd7 100644 --- a/metricbeat/module/haproxy/stat/stat.go +++ b/metricbeat/module/haproxy/stat/stat.go @@ -4,6 +4,7 @@ import ( //"errors" "fmt" "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/haproxy" //"net" @@ -13,7 +14,10 @@ const ( // defaultSocket is the default path to the unix socket tfor stats on haproxy. statsMethod = "stat" defaultAddr = "unix:///var/lib/haproxy/stats" - //defaultHttpPath = "http://localhost:8000/haproxy?stats;csv" +) + +var ( + debugf = logp.MakeDebug("haproxy-stat") ) // init registers the MetricSet with the central registry. @@ -30,8 +34,6 @@ func init() { // multiple fetch calls. type MetricSet struct { mb.BaseMetricSet - //statsMethod string - //statsPath string statsAddr string counter int } @@ -42,12 +44,8 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { config := struct { - //StatsMethod string `config:"stats_method"` - //StatsPath string `config:"stats_path"` StatsAddr string `config:"stats_addr"` }{ - //StatsMethod: "unix_socket", - //StatsPath: defaultSocket, StatsAddr: defaultAddr, } @@ -57,10 +55,8 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return &MetricSet{ BaseMetricSet: base, - //statsMethod: config.StatsMethod, - //statsPath: config.StatsPath, - statsAddr: config.StatsAddr, - counter: 1, + statsAddr: config.StatsAddr, + counter: 1, }, nil } @@ -69,44 +65,6 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // descriptive error must be returned. func (m *MetricSet) Fetch() ([]common.MapStr, error) { - //var metricSetSlice []common.MapStr - - /* - if m.statsMethod == "unix_socket" { - - m.counter++ - - c, err := net.Dial("unix", m.statsPath) - if err != nil { - return nil, fmt.Errorf(fmt.Sprintf("HAProxy %s error: %s", statsMethod, err)) - } - defer c.Close() - - // Write the command to the socket - _, err = c.Write([]byte(fmt.Sprintf("show %s\n", statsMethod))) - if err != nil { - return nil, fmt.Errorf("Socket write error: %s", err) - } - - // Now read from the socket - buf := make([]byte, 2048) - for { - _, err := c.Read(buf[:]) - if err != nil { - return nil, err - } - return eventMapping(parseResponse(buf)), nil - } - - } else { - // Get the data from the HTTP URI - m.counter++ - - } - - return nil, errors.New("Error getting HAProxy stat") - */ - hapc, err := haproxy.NewHaproxyClient(m.statsAddr) if err != nil { return nil, fmt.Errorf(fmt.Sprintf("HAProxy Client error: %s", err)) From ac6dbe174321702e79d96687b15fdb92accc96d6 Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Sun, 21 Aug 2016 10:24:07 -0400 Subject: [PATCH 06/19] Committing current changes --- metricbeat/docker-compose.yml | 3 + metricbeat/docker-entrypoint.sh | 36 +++- .../module/haproxy/_meta/config.full.yml | 11 + metricbeat/module/haproxy/_meta/haproxy.conf | 39 ++++ .../module/haproxy/info/_meta/fields.yml | 191 +++++++++++++++++- 5 files changed, 270 insertions(+), 10 deletions(-) create mode 100644 metricbeat/module/haproxy/_meta/config.full.yml create mode 100644 metricbeat/module/haproxy/_meta/haproxy.conf diff --git a/metricbeat/docker-compose.yml b/metricbeat/docker-compose.yml index 16f77ad1b34..0fd3d560f47 100644 --- a/metricbeat/docker-compose.yml +++ b/metricbeat/docker-compose.yml @@ -48,6 +48,9 @@ mysql: nginx: build: ${PWD}/module/nginx/_meta +haproxy: + build: ${PWD}/module/haproxy/_meta + redis: image: redis:3.2.0 diff --git a/metricbeat/docker-entrypoint.sh b/metricbeat/docker-entrypoint.sh index 198afbd6f5c..0dd168c7143 100755 --- a/metricbeat/docker-entrypoint.sh +++ b/metricbeat/docker-entrypoint.sh @@ -3,12 +3,31 @@ set -e # Wait for. Params: host, port, service waitFor() { - echo -n "Waiting for ${3}(${1}:${2}) to start." + + if [ $# == "3" ]; then + SERVICE=$3 + ADDRESS="${1}:${2}" + else + SERVICE=$2 + ADDRESS="${1}" + fi + + echo -n "Waiting for ${SERVICE}(${ADDRESS}) to start." + for ((i=1; i<=90; i++)) do - if nc -vz ${1} ${2} 2>/dev/null; then - echo - echo "${3} is ready!" - return 0 + + if [ $# == "3" ]; then + if nc -vz ${1} ${2} 2>/dev/null; then + echo + echo "${SERVICE} is ready!" + return 0 + fi + else + if nc -Uvz ${1} 2>/dev/null; then + echo + echo "${SERVICE} is ready!" + return 0 + fi fi ((i++)) @@ -17,8 +36,10 @@ waitFor() { done echo - echo >&2 "${3} is not available" - echo >&2 "Address: ${1}:${2}" + echo >&2 "${SERVICE} is not available" + echo >&2 "Address: ${ADDRESS} " + + nc -U /tmp/haproxy-stats.sock } # Main @@ -27,4 +48,5 @@ waitFor ${MYSQL_HOST} ${MYSQL_PORT} MySQL waitFor ${NGINX_HOST} ${NGINX_PORT} Nginx waitFor ${REDIS_HOST} ${REDIS_PORT} Redis waitFor ${ZOOKEEPER_HOST} ${ZOOKEEPER_PORT} Zookeeper +waitFor ${HAPROXY_STATS_SOCKET} HAProxy exec "$@" diff --git a/metricbeat/module/haproxy/_meta/config.full.yml b/metricbeat/module/haproxy/_meta/config.full.yml new file mode 100644 index 00000000000..190ff50f833 --- /dev/null +++ b/metricbeat/module/haproxy/_meta/config.full.yml @@ -0,0 +1,11 @@ +#- module: haproxy + + #metricsets: + #- "stat" + #- "info" + + #enabled: true + + #period: 10s + + #stats_addr: "unix:///tmp/haproxy-stats.sock" diff --git a/metricbeat/module/haproxy/_meta/haproxy.conf b/metricbeat/module/haproxy/_meta/haproxy.conf new file mode 100644 index 00000000000..e4c984ff3af --- /dev/null +++ b/metricbeat/module/haproxy/_meta/haproxy.conf @@ -0,0 +1,39 @@ + +global + # maximum number of simultaneous active connections + maxconn 4000 + #daemon + user root + group staff + + # for restarts + pidfile /var/run/haproxy.pid + # Logging to syslog facility local0 + log 127.0.0.1 local0 + stats socket /var/run/haproxy.sock mode 700 + spread-checks 5 + #debug + +defaults + + log global + mode http + balance roundrobin + maxconn 25000 + option httplog + option abortonclose + option httpclose + option forwardfor + retries 3 + option redispatch + + timeout client 30s + timeout connect 30s + timeout server 30s + option httpchk HEAD /haproxy?monitor HTTP/1.0 + timeout check 5s + +listen http-webservices + + bind 0.0.0.0:8888 + server log1 127.0.0.1:8889 check diff --git a/metricbeat/module/haproxy/info/_meta/fields.yml b/metricbeat/module/haproxy/info/_meta/fields.yml index b488684b201..031c2adc8e7 100644 --- a/metricbeat/module/haproxy/info/_meta/fields.yml +++ b/metricbeat/module/haproxy/info/_meta/fields.yml @@ -3,7 +3,192 @@ description: > General infomration collected on HAProxy process fields: - - name: example - type: keyword + - name: nb_proc + type: intger + description: > + Number of processes + + - name: process_num + type: intger + description: > + Process number + + - name: pid + type: intger + description: > + Process ID + + - name: uptime_sec + type: intger + description: > + Current uptime in seconds + + - name: mem_max_mb + type: intger + description: > + Max number of memory usage in MB + + - name: ulimit_n + type: intger + description: > + Max number of open files for process + + - name: max_sock + type: intger + description: > + + - name: max_conn + type: intger + description: > + + - name: hard_max_conn + type: intger + description: > + + - name: curr_conns + type: intger + description: > + + - name: cum_conns + type: intger + description: > + + - name: cum_req + type: intger + description: > + + - name: max_ssl_conns + type: intger + description: > + + - name: curr_ssl_conns + type: intger + description: > + + - name: cum_ssl_conns + type: intger + description: > + + - name: max_pipes + type: intger + description: > + + - name: pipes_used + type: intger + description: > + + - name: pipes_free + type: intger + description: > + + - name: conn_rate + type: intger + description: > + + - name: conn_rate_limit + type: intger + description: > + + - name: max_conn_rate + type: intger + description: > + + - name: sess_rate + type: intger + description: > + + - name: sess_rate_limit + type: intger + description: > + + - name: max_sess_rate + type: intger + description: > + + - name: ssl_rate + type: intger + description: > + + - name: ssl_rate_limit + type: intger + description: > + + - name: max_ssl_rate + type: intger + description: > + + - name: ssl_frontend_key_rate + type: intger + description: > + + - name: ssl_frontend_max_key_rate + type: intger + description: > + + - name: ssl_frontend_session_reuse_pct + type: intger + description: > + + - name: ssl_babckend_key_rate + type: intger + description: > + + - name: ssl_frontend_key_rate + type: intger + description: > + + - name: ssl_frontend_max_key_rate + type: intger + description: > + + - name: ssl_frontend_session_reuse_pct + type: intger + description: > + + - name: ssl_babckend_key_rate + type: intger + description: > + + - name: ssl_backend_max_key_rate + type: intger + description: > + + - name: ssl_cached_lookups + type: intger + description: > + + - name: ssl_cache_misses + type: intger + description: > + + - name: compress_bps_in + type: intger + description: > + + - name: compress_bps_out + type: intger + description: > + + - name: compress_bps_rate_limit + type: intger + description: > + + - name: zlib_mem_usage + type: intger + description: > + + - name: max_zlib_mem_usage + type: intger + description: > + + - name: tasks + type: intger + description: > + + - name: run_queue + type: intger + description: > + + - name: idle_pct + type: intger description: > - Example field From 543319e1800db102cff5ab63eb6d7b948cf99593 Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Sun, 21 Aug 2016 19:32:19 -0400 Subject: [PATCH 07/19] Updated necessary test related files --- metricbeat/docker-compose.yml | 2 ++ metricbeat/docker-entrypoint.sh | 2 +- metricbeat/module/haproxy/_meta/Dockerfile | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 metricbeat/module/haproxy/_meta/Dockerfile diff --git a/metricbeat/docker-compose.yml b/metricbeat/docker-compose.yml index 0fd3d560f47..03fa210db5d 100644 --- a/metricbeat/docker-compose.yml +++ b/metricbeat/docker-compose.yml @@ -6,6 +6,7 @@ beat: - mysql - nginx - redis + - haproxy - zookeeper environment: - APACHE_HOST=apache @@ -21,6 +22,7 @@ beat: - MYSQL_PORT=3306 - ZOOKEEPER_HOST=zookeeper - ZOOKEEPER_PORT=2181 + - HAPROXY_ADDR=/var/run/haproxy.sock - TEST_ENVIRONMENT=false working_dir: /go/src/github.com/elastic/beats/metricbeat volumes: diff --git a/metricbeat/docker-entrypoint.sh b/metricbeat/docker-entrypoint.sh index 0dd168c7143..44a643c73f9 100755 --- a/metricbeat/docker-entrypoint.sh +++ b/metricbeat/docker-entrypoint.sh @@ -48,5 +48,5 @@ waitFor ${MYSQL_HOST} ${MYSQL_PORT} MySQL waitFor ${NGINX_HOST} ${NGINX_PORT} Nginx waitFor ${REDIS_HOST} ${REDIS_PORT} Redis waitFor ${ZOOKEEPER_HOST} ${ZOOKEEPER_PORT} Zookeeper -waitFor ${HAPROXY_STATS_SOCKET} HAProxy +waitFor ${HAPROXY_ADDR} HAProxy exec "$@" diff --git a/metricbeat/module/haproxy/_meta/Dockerfile b/metricbeat/module/haproxy/_meta/Dockerfile new file mode 100644 index 00000000000..6fca1419626 --- /dev/null +++ b/metricbeat/module/haproxy/_meta/Dockerfile @@ -0,0 +1,2 @@ +FROM haproxy:1.6 +COPY haproxy.conf /usr/local/etc/haproxy/haproxy.cfg \ No newline at end of file From 52652bfba1c3d2b2a1b900fde6007bf47506a2f3 Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Tue, 23 Aug 2016 20:25:01 -0400 Subject: [PATCH 08/19] Committing current changes --- metricbeat/docker-compose.yml | 5 ++- metricbeat/docker-entrypoint.sh | 37 ++++--------------- metricbeat/metricbeat.yml | 3 +- .../module/haproxy/_meta/config.full.yml | 2 +- metricbeat/module/haproxy/_meta/config.yml | 9 +++-- metricbeat/module/haproxy/_meta/haproxy.conf | 2 +- 6 files changed, 20 insertions(+), 38 deletions(-) diff --git a/metricbeat/docker-compose.yml b/metricbeat/docker-compose.yml index 03fa210db5d..580d3e78ba0 100644 --- a/metricbeat/docker-compose.yml +++ b/metricbeat/docker-compose.yml @@ -6,8 +6,8 @@ beat: - mysql - nginx - redis - - haproxy - zookeeper + - haproxy environment: - APACHE_HOST=apache - APACHE_PORT=80 @@ -22,7 +22,8 @@ beat: - MYSQL_PORT=3306 - ZOOKEEPER_HOST=zookeeper - ZOOKEEPER_PORT=2181 - - HAPROXY_ADDR=/var/run/haproxy.sock + - HAPROXY_HOST=haproxy + - HAPROXY_PORT=14567 - TEST_ENVIRONMENT=false working_dir: /go/src/github.com/elastic/beats/metricbeat volumes: diff --git a/metricbeat/docker-entrypoint.sh b/metricbeat/docker-entrypoint.sh index 44a643c73f9..10282bbd8db 100755 --- a/metricbeat/docker-entrypoint.sh +++ b/metricbeat/docker-entrypoint.sh @@ -3,31 +3,12 @@ set -e # Wait for. Params: host, port, service waitFor() { - - if [ $# == "3" ]; then - SERVICE=$3 - ADDRESS="${1}:${2}" - else - SERVICE=$2 - ADDRESS="${1}" - fi - - echo -n "Waiting for ${SERVICE}(${ADDRESS}) to start." - + echo -n "Waiting for ${3}(${1}:${2}) to start." for ((i=1; i<=90; i++)) do - - if [ $# == "3" ]; then - if nc -vz ${1} ${2} 2>/dev/null; then - echo - echo "${SERVICE} is ready!" - return 0 - fi - else - if nc -Uvz ${1} 2>/dev/null; then - echo - echo "${SERVICE} is ready!" - return 0 - fi + if nc -vz ${1} ${2} 2>/dev/null; then + echo + echo "${3} is ready!" + return 0 fi ((i++)) @@ -36,10 +17,8 @@ waitFor() { done echo - echo >&2 "${SERVICE} is not available" - echo >&2 "Address: ${ADDRESS} " - - nc -U /tmp/haproxy-stats.sock + echo >&2 "${3} is not available" + echo >&2 "Address: ${1}:${2}" } # Main @@ -48,5 +27,5 @@ waitFor ${MYSQL_HOST} ${MYSQL_PORT} MySQL waitFor ${NGINX_HOST} ${NGINX_PORT} Nginx waitFor ${REDIS_HOST} ${REDIS_PORT} Redis waitFor ${ZOOKEEPER_HOST} ${ZOOKEEPER_PORT} Zookeeper -waitFor ${HAPROXY_ADDR} HAProxy +waitFor ${HAPROXY_HOST} ${HAPROXY_PORT} HAProxy exec "$@" diff --git a/metricbeat/metricbeat.yml b/metricbeat/metricbeat.yml index ac215c28bed..9b80600a365 100644 --- a/metricbeat/metricbeat.yml +++ b/metricbeat/metricbeat.yml @@ -50,7 +50,8 @@ metricbeat.modules: - "info" enabled: true period: 10s - stats_addr: "unix:///tmp/haproxy-stats.sock" + #stats_addr: "unix:///tmp/haproxy-stats.sock" + stats_addr: "tcp://127.0.0.1:14567" diff --git a/metricbeat/module/haproxy/_meta/config.full.yml b/metricbeat/module/haproxy/_meta/config.full.yml index 190ff50f833..e01d0d646c6 100644 --- a/metricbeat/module/haproxy/_meta/config.full.yml +++ b/metricbeat/module/haproxy/_meta/config.full.yml @@ -8,4 +8,4 @@ #period: 10s - #stats_addr: "unix:///tmp/haproxy-stats.sock" + #stats_addr: "tcp://127.0.0.1:14567 diff --git a/metricbeat/module/haproxy/_meta/config.yml b/metricbeat/module/haproxy/_meta/config.yml index e5958517958..f2589e7ae19 100644 --- a/metricbeat/module/haproxy/_meta/config.yml +++ b/metricbeat/module/haproxy/_meta/config.yml @@ -1,6 +1,7 @@ - module: haproxy - metricsets: ["stat"] + metricsets: + - "stat" + - "info" enabled: true - period: 1s - hosts: ["localhost"] - + period: 10s + stats_addr: "tcp://127.0.0.1:14567" diff --git a/metricbeat/module/haproxy/_meta/haproxy.conf b/metricbeat/module/haproxy/_meta/haproxy.conf index e4c984ff3af..ba79afc372f 100644 --- a/metricbeat/module/haproxy/_meta/haproxy.conf +++ b/metricbeat/module/haproxy/_meta/haproxy.conf @@ -10,7 +10,7 @@ global pidfile /var/run/haproxy.pid # Logging to syslog facility local0 log 127.0.0.1 local0 - stats socket /var/run/haproxy.sock mode 700 + stats socket 0.0.0.0:14567 spread-checks 5 #debug From eb7c22dc2b5dce816ed7ba900ec208d147f72659 Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Wed, 24 Aug 2016 15:42:19 -0400 Subject: [PATCH 09/19] Committing current changes --- vendor/github.com/gocarina/gocsv/LICENSE | 21 + vendor/github.com/gocarina/gocsv/README.md | 151 +++ vendor/github.com/gocarina/gocsv/csv.go | 199 +++ vendor/github.com/gocarina/gocsv/decode.go | 274 ++++ .../github.com/gocarina/gocsv/decode_test.go | 415 ++++++ vendor/github.com/gocarina/gocsv/encode.go | 135 ++ .../github.com/gocarina/gocsv/encode_test.go | 214 ++++ vendor/github.com/gocarina/gocsv/reflect.go | 104 ++ .../gocarina/gocsv/sample_structs_test.go | 42 + vendor/github.com/gocarina/gocsv/types.go | 430 +++++++ .../github.com/mitchellh/mapstructure/LICENSE | 21 + .../mitchellh/mapstructure/README.md | 46 + .../mitchellh/mapstructure/decode_hooks.go | 154 +++ .../mapstructure/decode_hooks_test.go | 229 ++++ .../mitchellh/mapstructure/error.go | 50 + .../mitchellh/mapstructure/mapstructure.go | 790 ++++++++++++ .../mapstructure_benchmark_test.go | 279 +++++ .../mapstructure/mapstructure_bugs_test.go | 47 + .../mapstructure_examples_test.go | 203 +++ .../mapstructure/mapstructure_test.go | 1107 +++++++++++++++++ vendor/vendor.json | 19 + 21 files changed, 4930 insertions(+) create mode 100644 vendor/github.com/gocarina/gocsv/LICENSE create mode 100644 vendor/github.com/gocarina/gocsv/README.md create mode 100644 vendor/github.com/gocarina/gocsv/csv.go create mode 100644 vendor/github.com/gocarina/gocsv/decode.go create mode 100644 vendor/github.com/gocarina/gocsv/decode_test.go create mode 100644 vendor/github.com/gocarina/gocsv/encode.go create mode 100644 vendor/github.com/gocarina/gocsv/encode_test.go create mode 100644 vendor/github.com/gocarina/gocsv/reflect.go create mode 100644 vendor/github.com/gocarina/gocsv/sample_structs_test.go create mode 100644 vendor/github.com/gocarina/gocsv/types.go create mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE create mode 100644 vendor/github.com/mitchellh/mapstructure/README.md create mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go create mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go create mode 100644 vendor/github.com/mitchellh/mapstructure/error.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go create mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure_test.go create mode 100644 vendor/vendor.json diff --git a/vendor/github.com/gocarina/gocsv/LICENSE b/vendor/github.com/gocarina/gocsv/LICENSE new file mode 100644 index 00000000000..052a371193e --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Jonathan Picques + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gocarina/gocsv/README.md b/vendor/github.com/gocarina/gocsv/README.md new file mode 100644 index 00000000000..69aabca636c --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/README.md @@ -0,0 +1,151 @@ +Go CSV +===== + +The GoCSV package aims to provide easy serialization and deserialization functions to use CSV in Golang + +API and techniques inspired from https://godoc.org/gopkg.in/mgo.v2 + +[![GoDoc](https://godoc.org/github.com/gocarina/gocsv?status.png)](https://godoc.org/github.com/gocarina/gocsv) +[![Build Status](https://travis-ci.org/gocarina/gocsv.svg?branch=master)](https://travis-ci.org/gocarina/gocsv) + +Installation +===== + +```go get -u github.com/gocarina/gocsv``` + +Full example +===== + +Consider the following CSV file + +```csv + +client_id,client_name,client_age +1,Jose,42 +2,Daniel,26 +3,Vincent,32 + +``` + +Easy binding in Go! +--- + +```go + +package main + +import ( + "fmt" + "gocsv" + "os" +) + +type Client struct { // Our example struct, you can use "-" to ignore a field + Id string `csv:"id"` + Name string `csv:"name"` + Age string `csv:"age"` + NotUsed string `csv:"-"` +} + +func main() { + clientsFile, err := os.OpenFile("clients.csv", os.O_RDWR|os.O_CREATE, os.ModePerm) + if err != nil { + panic(err) + } + defer clientsFile.Close() + + clients := []*Client{} + + if err := gocsv.UnmarshalFile(clientsFile, &clients); err != nil { // Load clients from file + panic(err) + } + for _, client := range clients { + fmt.Println("Hello", client.Name) + } + + if _, err := clientsFile.Seek(0, 0); err != nil { // Go to the start of the file + panic(err) + } + + clients = append(clients, &Client{Id: "12", Name: "John", Age: "21"}) // Add clients + clients = append(clients, &Client{Id: "13", Name: "Fred"}) + clients = append(clients, &Client{Id: "14", Name: "James", Age: "32"}) + clients = append(clients, &Client{Id: "15", Name: "Danny"}) + csvContent, err := gocsv.MarshalString(&clients) // Get all clients as CSV string + //err = gocsv.MarshalFile(&clients, clientsFile) // Use this to save the CSV back to the file + if err != nil { + panic(err) + } + fmt.Println(csvContent) // Display all clients as CSV string + +} + +``` + +Customizable Converters +--- + +```go + +type DateTime struct { + time.Time +} + +// Convert the internal date as CSV string +func (date *DateTime) MarshalCSV() (string, error) { + return date.Time.Format("20060201"), nil +} + +// You could also use the standard Stringer interface +func (date *DateTime) String() (string) { + return date.String() // Redundant, just for example +} + +// Convert the CSV string as internal date +func (date *DateTime) UnmarshalCSV(csv string) (err error) { + date.Time, err = time.Parse("20060201", csv) + if err != nil { + return err + } + return nil +} + +type Client struct { // Our example struct with a custom type (DateTime) + Id string `csv:"id"` + Name string `csv:"name"` + Employed DateTime `csv:"employed"` +} + +``` + +Customizable CSV Reader / Writer +--- + +```go + +func main() { + ... + + gocsv.SetCSVReader(func(in io.Reader) *csv.Reader { + //return csv.NewReader(in) + return gocsv.LazyCSVReader(in) // Allows use of quotes in CSV + }) + + ... + + gocsv.UnmarshalFile(file, &clients) + + ... + + gocsv.SetCSVWriter(func(out io.Writer) *csv.Writer { + return csv.NewWriter(out) + }) + + ... + + gocsv.MarshalFile(&clients, file) + + ... +} + +``` diff --git a/vendor/github.com/gocarina/gocsv/csv.go b/vendor/github.com/gocarina/gocsv/csv.go new file mode 100644 index 00000000000..7725ab1cb86 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/csv.go @@ -0,0 +1,199 @@ +// Copyright 2014 Jonathan Picques. All rights reserved. +// Use of this source code is governed by a MIT license +// The license can be found in the LICENSE file. + +// The GoCSV package aims to provide easy CSV serialization and deserialization to the golang programming language + +package gocsv + +import ( + "bytes" + "encoding/csv" + "fmt" + "io" + "os" + "reflect" + "strings" +) + +// FailIfUnmatchedStructTags indicates whether it is considered an error when there is an unmatched +// struct tag. +var FailIfUnmatchedStructTags = false + +// FailIfDoubleHeaderNames indicates whether it is considered an error when a header name is repeated +// in the csv header. +var FailIfDoubleHeaderNames = false + +// TagSeparator defines seperator string for multiple csv tags in struct fields +var TagSeparator = "," + +// -------------------------------------------------------------------------- +// CSVWriter used to format CSV + +var selfCSVWriter = DefaultCSVWriter + +// DefaultCSVWriter is the default CSV writer used to format CSV (cf. csv.NewWriter) +func DefaultCSVWriter(out io.Writer) *csv.Writer { + return csv.NewWriter(out) +} + +// SetCSVWriter sets the CSV writer used to format CSV. +func SetCSVWriter(csvWriter func(io.Writer) *csv.Writer) { + selfCSVWriter = csvWriter +} + +func getCSVWriter(out io.Writer) *csv.Writer { + return selfCSVWriter(out) +} + +// -------------------------------------------------------------------------- +// CSVReader used to parse CSV + +var selfCSVReader = DefaultCSVReader + +// DefaultCSVReader is the default CSV reader used to parse CSV (cf. csv.NewReader) +func DefaultCSVReader(in io.Reader) *csv.Reader { + return csv.NewReader(in) +} + +// LazyCSVReader returns a lazy CSV reader, with LazyQuotes and TrimLeadingSpace. +func LazyCSVReader(in io.Reader) *csv.Reader { + csvReader := csv.NewReader(in) + csvReader.LazyQuotes = true + csvReader.TrimLeadingSpace = true + return csvReader +} + +// SetCSVReader sets the CSV reader used to parse CSV. +func SetCSVReader(csvReader func(io.Reader) *csv.Reader) { + selfCSVReader = csvReader +} + +func getCSVReader(in io.Reader) *csv.Reader { + return selfCSVReader(in) +} + +// -------------------------------------------------------------------------- +// Marshal functions + +// MarshalFile saves the interface as CSV in the file. +func MarshalFile(in interface{}, file *os.File) (err error) { + return Marshal(in, file) +} + +// MarshalString returns the CSV string from the interface. +func MarshalString(in interface{}) (out string, err error) { + bufferString := bytes.NewBufferString(out) + if err := Marshal(in, bufferString); err != nil { + return "", err + } + return bufferString.String(), nil +} + +// MarshalBytes returns the CSV bytes from the interface. +func MarshalBytes(in interface{}) (out []byte, err error) { + bufferString := bytes.NewBuffer(out) + if err := Marshal(in, bufferString); err != nil { + return nil, err + } + return bufferString.Bytes(), nil +} + +// Marshal returns the CSV in writer from the interface. +func Marshal(in interface{}, out io.Writer) (err error) { + writer := getCSVWriter(out) + return writeTo(writer, in) +} + +// MarshalChan returns the CSV read from the channel. +func MarshalChan(c <-chan interface{}, out *csv.Writer) error { + return writeFromChan(out, c) +} + +// MarshalCSV returns the CSV in writer from the interface. +func MarshalCSV(in interface{}, out *csv.Writer) (err error) { + return writeTo(out, in) +} + +// -------------------------------------------------------------------------- +// Unmarshal functions + +// UnmarshalFile parses the CSV from the file in the interface. +func UnmarshalFile(in *os.File, out interface{}) (err error) { + return Unmarshal(in, out) +} + +// UnmarshalString parses the CSV from the string in the interface. +func UnmarshalString(in string, out interface{}) (err error) { + return Unmarshal(strings.NewReader(in), out) +} + +// UnmarshalBytes parses the CSV from the bytes in the interface. +func UnmarshalBytes(in []byte, out interface{}) (err error) { + return Unmarshal(bytes.NewReader(in), out) +} + +// Unmarshal parses the CSV from the reader in the interface. +func Unmarshal(in io.Reader, out interface{}) (err error) { + return readTo(newDecoder(in), out) +} + +// UnmarshalCSV parses the CSV from the reader in the interface. +func UnmarshalCSV(in *csv.Reader, out interface{}) error { + return readTo(csvDecoder{in}, out) +} + +// UnmarshalToChan parses the CSV from the reader and send each value in the chan c. +// The channel must have a concrete type. +func UnmarshalToChan(in io.Reader, c interface{}) (err error) { + return readEach(newDecoder(in), c) +} + +// UnmarshalStringToChan parses the CSV from the string and send each value in the chan c. +// The channel must have a concrete type. +func UnmarshalStringToChan(in string, c interface{}) (err error) { + return UnmarshalToChan(strings.NewReader(in), c) +} + +// UnmarshalBytesToChan parses the CSV from the bytes and send each value in the chan c. +// The channel must have a concrete type. +func UnmarshalBytesToChan(in []byte, c interface{}) (err error) { + return UnmarshalToChan(bytes.NewReader(in), c) +} + +// UnmarshalToCallback parses the CSV from the reader and send each value to the given func f. +// The func must look like func(Struct). +func UnmarshalToCallback(in io.Reader, f interface{}) (err error) { + valueFunc := reflect.ValueOf(f) + t := reflect.TypeOf(f) + if t.NumIn() != 1 { + return fmt.Errorf("the given function must have exactly one parameter") + } + c := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, t.In(0)), 0) + go func() { + err = UnmarshalToChan(in, c.Interface()) + }() + for { + if err != nil { + return err + } + v, notClosed := c.Recv() + if !notClosed || v.Interface() == nil { + break + } + valueFunc.Call([]reflect.Value{v}) + } + return +} + +// UnmarshalBytesToCallback parses the CSV from the bytes and send each value to the given func f. +// The func must look like func(Struct). +func UnmarshalBytesToCallback(in []byte, f interface{}) (err error) { + return UnmarshalToCallback(bytes.NewReader(in), f) +} + +// UnmarshalStringToCallback parses the CSV from the string and send each value to the given func f. +// The func must look like func(Struct). +func UnmarshalStringToCallback(in string, c interface{}) (err error) { + return UnmarshalToCallback(strings.NewReader(in), c) +} diff --git a/vendor/github.com/gocarina/gocsv/decode.go b/vendor/github.com/gocarina/gocsv/decode.go new file mode 100644 index 00000000000..c1abcb28133 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/decode.go @@ -0,0 +1,274 @@ +package gocsv + +import ( + "encoding/csv" + "errors" + "fmt" + "io" + "reflect" +) + +// Decoder . +type Decoder interface { + getCSVRows() ([][]string, error) +} + +// SimpleDecoder . +type SimpleDecoder interface { + getCSVRow() ([]string, error) +} + +type decoder struct { + in io.Reader + csvDecoder *csvDecoder +} + +func newDecoder(in io.Reader) *decoder { + return &decoder{in: in} +} + +func (decode *decoder) getCSVRows() ([][]string, error) { + return getCSVReader(decode.in).ReadAll() +} + +func (decode *decoder) getCSVRow() ([]string, error) { + if decode.csvDecoder == nil { + decode.csvDecoder = &csvDecoder{getCSVReader(decode.in)} + } + return decode.csvDecoder.Read() +} + +type csvDecoder struct { + *csv.Reader +} + +func (c csvDecoder) getCSVRows() ([][]string, error) { + return c.ReadAll() +} + +func (c csvDecoder) getCSVRow() ([]string, error) { + return c.Read() +} + +func maybeMissingStructFields(structInfo []fieldInfo, headers []string) error { + if len(structInfo) == 0 { + return nil + } + + headerMap := make(map[string]struct{}, len(headers)) + for idx := range headers { + headerMap[headers[idx]] = struct{}{} + } + + for _, info := range structInfo { + found := false + for _, key := range info.keys { + if _, ok := headerMap[key]; ok { + found = true + break + } + } + if !found { + return fmt.Errorf("found unmatched struct field with tags %v", info.keys) + } + } + return nil +} + +// Check that no header name is repeated twice +func maybeDoubleHeaderNames(headers []string) error { + headerMap := make(map[string]bool, len(headers)) + for _, v := range headers { + if _, ok := headerMap[v]; ok { + return fmt.Errorf("Repeated header name: %v", v) + } + headerMap[v] = true + } + return nil +} + +func readTo(decoder Decoder, out interface{}) error { + outValue, outType := getConcreteReflectValueAndType(out) // Get the concrete type (not pointer) (Slice or Array) + if err := ensureOutType(outType); err != nil { + return err + } + outInnerWasPointer, outInnerType := getConcreteContainerInnerType(outType) // Get the concrete inner type (not pointer) (Container<"?">) + if err := ensureOutInnerType(outInnerType); err != nil { + return err + } + csvRows, err := decoder.getCSVRows() // Get the CSV csvRows + if err != nil { + return err + } + if len(csvRows) == 0 { + return errors.New("empty csv file given") + } + if err := ensureOutCapacity(&outValue, len(csvRows)); err != nil { // Ensure the container is big enough to hold the CSV content + return err + } + outInnerStructInfo := getStructInfo(outInnerType) // Get the inner struct info to get CSV annotations + if len(outInnerStructInfo.Fields) == 0 { + return errors.New("no csv struct tags found") + } + + headers := csvRows[0] + body := csvRows[1:] + + csvHeadersLabels := make(map[int]*fieldInfo, len(outInnerStructInfo.Fields)) // Used to store the correspondance header <-> position in CSV + + for i, csvColumnHeader := range headers { + if fieldInfo := getCSVFieldPosition(csvColumnHeader, outInnerStructInfo); fieldInfo != nil { + csvHeadersLabels[i] = fieldInfo + } + } + + if FailIfUnmatchedStructTags { + if err := maybeMissingStructFields(outInnerStructInfo.Fields, headers); err != nil { + return err + } + } + if FailIfDoubleHeaderNames { + if err := maybeDoubleHeaderNames(headers); err != nil { + return err + } + } + + for i, csvRow := range body { + outInner := createNewOutInner(outInnerWasPointer, outInnerType) + for j, csvColumnContent := range csvRow { + if fieldInfo, ok := csvHeadersLabels[j]; ok { // Position found accordingly to header name + if err := setInnerField(&outInner, outInnerWasPointer, fieldInfo.IndexChain, csvColumnContent); err != nil { // Set field of struct + return &csv.ParseError{ + Line: i + 2, //add 2 to account for the header & 0-indexing of arrays + Column: j + 1, + Err: err, + } + } + } + } + outValue.Index(i).Set(outInner) + } + return nil +} + +func readEach(decoder SimpleDecoder, c interface{}) error { + headers, err := decoder.getCSVRow() + if err != nil { + return err + } + outValue, outType := getConcreteReflectValueAndType(c) // Get the concrete type (not pointer) (Slice or Array) + if err := ensureOutType(outType); err != nil { + return err + } + defer outValue.Close() + outInnerWasPointer, outInnerType := getConcreteContainerInnerType(outType) // Get the concrete inner type (not pointer) (Container<"?">) + if err := ensureOutInnerType(outInnerType); err != nil { + return err + } + outInnerStructInfo := getStructInfo(outInnerType) // Get the inner struct info to get CSV annotations + if len(outInnerStructInfo.Fields) == 0 { + return errors.New("no csv struct tags found") + } + csvHeadersLabels := make(map[int]*fieldInfo, len(outInnerStructInfo.Fields)) // Used to store the correspondance header <-> position in CSV + for i, csvColumnHeader := range headers { + if fieldInfo := getCSVFieldPosition(csvColumnHeader, outInnerStructInfo); fieldInfo != nil { + csvHeadersLabels[i] = fieldInfo + } + } + if err := maybeMissingStructFields(outInnerStructInfo.Fields, headers); err != nil { + if FailIfUnmatchedStructTags { + return err + } + } + if FailIfDoubleHeaderNames { + if err := maybeDoubleHeaderNames(headers); err != nil { + return err + } + } + i := 0 + for { + line, err := decoder.getCSVRow() + if err == io.EOF { + break + } else if err != nil { + return err + } + outInner := createNewOutInner(outInnerWasPointer, outInnerType) + for j, csvColumnContent := range line { + if fieldInfo, ok := csvHeadersLabels[j]; ok { // Position found accordingly to header name + if err := setInnerField(&outInner, outInnerWasPointer, fieldInfo.IndexChain, csvColumnContent); err != nil { // Set field of struct + return &csv.ParseError{ + Line: i + 2, //add 2 to account for the header & 0-indexing of arrays + Column: j + 1, + Err: err, + } + } + } + } + outValue.Send(outInner) + i++ + } + return nil +} + +// Check if the outType is an array or a slice +func ensureOutType(outType reflect.Type) error { + switch outType.Kind() { + case reflect.Slice: + fallthrough + case reflect.Chan: + fallthrough + case reflect.Array: + return nil + } + return fmt.Errorf("cannot use " + outType.String() + ", only slice or array supported") +} + +// Check if the outInnerType is of type struct +func ensureOutInnerType(outInnerType reflect.Type) error { + switch outInnerType.Kind() { + case reflect.Struct: + return nil + } + return fmt.Errorf("cannot use " + outInnerType.String() + ", only struct supported") +} + +func ensureOutCapacity(out *reflect.Value, csvLen int) error { + switch out.Kind() { + case reflect.Array: + if out.Len() < csvLen-1 { // Array is not big enough to hold the CSV content (arrays are not addressable) + return fmt.Errorf("array capacity problem: cannot store %d %s in %s", csvLen-1, out.Type().Elem().String(), out.Type().String()) + } + case reflect.Slice: + if !out.CanAddr() && out.Len() < csvLen-1 { // Slice is not big enough tho hold the CSV content and is not addressable + return fmt.Errorf("slice capacity problem and is not addressable (did you forget &?)") + } else if out.CanAddr() && out.Len() < csvLen-1 { + out.Set(reflect.MakeSlice(out.Type(), csvLen-1, csvLen-1)) // Slice is not big enough, so grows it + } + } + return nil +} + +func getCSVFieldPosition(key string, structInfo *structInfo) *fieldInfo { + for _, field := range structInfo.Fields { + if field.matchesKey(key) { + return &field + } + } + return nil +} + +func createNewOutInner(outInnerWasPointer bool, outInnerType reflect.Type) reflect.Value { + if outInnerWasPointer { + return reflect.New(outInnerType) + } + return reflect.New(outInnerType).Elem() +} + +func setInnerField(outInner *reflect.Value, outInnerWasPointer bool, index []int, value string) error { + oi := *outInner + if outInnerWasPointer { + oi = outInner.Elem() + } + return setField(oi.FieldByIndex(index), value) +} diff --git a/vendor/github.com/gocarina/gocsv/decode_test.go b/vendor/github.com/gocarina/gocsv/decode_test.go new file mode 100644 index 00000000000..baafd8792a5 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/decode_test.go @@ -0,0 +1,415 @@ +package gocsv + +import ( + "bytes" + "encoding/csv" + "io" + "strconv" + "strings" + "testing" +) + +func Test_readTo(t *testing.T) { + b := bytes.NewBufferString(`foo,BAR,Baz +f,1,baz +e,3,b`) + d := &decoder{in: b} + + var samples []Sample + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if len(samples) != 2 { + t.Fatalf("expected 2 sample instances, got %d", len(samples)) + } + expected := Sample{Foo: "f", Bar: 1, Baz: "baz"} + if expected != samples[0] { + t.Fatalf("expected first sample %v, got %v", expected, samples[0]) + } + expected = Sample{Foo: "e", Bar: 3, Baz: "b"} + if expected != samples[1] { + t.Fatalf("expected second sample %v, got %v", expected, samples[1]) + } + + b = bytes.NewBufferString(`foo,BAR,Baz +f,1,baz +e,BAD_INPUT,b`) + d = &decoder{in: b} + samples = []Sample{} + err := readTo(d, &samples) + if err == nil { + t.Fatalf("Expected error from bad input, got: %+v", samples) + } + switch actualErr := err.(type) { + case *csv.ParseError: + if actualErr.Line != 3 { + t.Fatalf("Expected csv.ParseError on line 3, got: %d", actualErr.Line) + } + if actualErr.Column != 2 { + t.Fatalf("Expected csv.ParseError in column 2, got: %d", actualErr.Column) + } + default: + t.Fatalf("incorrect error type: %T", err) + } + +} + +func Test_readTo_complex_embed(t *testing.T) { + b := bytes.NewBufferString(`first,foo,BAR,Baz,last,abc +aa,bb,11,cc,dd,ee +ff,gg,22,hh,ii,jj`) + d := &decoder{in: b} + + var samples []SkipFieldSample + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if len(samples) != 2 { + t.Fatalf("expected 2 sample instances, got %d", len(samples)) + } + expected := SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "aa", + Sample: Sample{ + Foo: "bb", + Bar: 11, + Baz: "cc", + }, + Quux: "dd", + }, + Corge: "ee", + } + if expected != samples[0] { + t.Fatalf("expected first sample %v, got %v", expected, samples[0]) + } + expected = SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "ff", + Sample: Sample{ + Foo: "gg", + Bar: 22, + Baz: "hh", + }, + Quux: "ii", + }, + Corge: "jj", + } + if expected != samples[1] { + t.Fatalf("expected first sample %v, got %v", expected, samples[1]) + } +} + +func Test_readEach(t *testing.T) { + b := bytes.NewBufferString(`first,foo,BAR,Baz,last,abc +aa,bb,11,cc,dd,ee +ff,gg,22,hh,ii,jj`) + d := &decoder{in: b} + + c := make(chan SkipFieldSample) + var samples []SkipFieldSample + go func() { + if err := readEach(d, c); err != nil { + t.Fatal(err) + } + }() + for v := range c { + samples = append(samples, v) + } + if len(samples) != 2 { + t.Fatalf("expected 2 sample instances, got %d", len(samples)) + } + expected := SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "aa", + Sample: Sample{ + Foo: "bb", + Bar: 11, + Baz: "cc", + }, + Quux: "dd", + }, + Corge: "ee", + } + if expected != samples[0] { + t.Fatalf("expected first sample %v, got %v", expected, samples[0]) + } + expected = SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "ff", + Sample: Sample{ + Foo: "gg", + Bar: 22, + Baz: "hh", + }, + Quux: "ii", + }, + Corge: "jj", + } + if expected != samples[1] { + t.Fatalf("expected first sample %v, got %v", expected, samples[1]) + } +} + +func Test_maybeMissingStructFields(t *testing.T) { + structTags := []fieldInfo{ + {keys: []string{"foo"}}, + {keys: []string{"bar"}}, + {keys: []string{"baz"}}, + } + badHeaders := []string{"hi", "mom", "bacon"} + goodHeaders := []string{"foo", "bar", "baz"} + + // no tags to match, expect no error + if err := maybeMissingStructFields([]fieldInfo{}, goodHeaders); err != nil { + t.Fatal(err) + } + + // bad headers, expect an error + if err := maybeMissingStructFields(structTags, badHeaders); err == nil { + t.Fatal("expected an error, but no error found") + } + + // good headers, expect no error + if err := maybeMissingStructFields(structTags, goodHeaders); err != nil { + t.Fatal(err) + } + + // extra headers, but all structtags match; expect no error + moarHeaders := append(goodHeaders, "qux", "quux", "corge", "grault") + if err := maybeMissingStructFields(structTags, moarHeaders); err != nil { + t.Fatal(err) + } + + // not all structTags match, but there's plenty o' headers; expect + // error + mismatchedHeaders := []string{"foo", "qux", "quux", "corgi"} + if err := maybeMissingStructFields(structTags, mismatchedHeaders); err == nil { + t.Fatal("expected an error, but no error found") + } +} + +func Test_maybeDoubleHeaderNames(t *testing.T) { + b := bytes.NewBufferString(`foo,BAR,foo +f,1,baz +e,3,b`) + d := &decoder{in: b} + var samples []Sample + + // *** check maybeDoubleHeaderNames + if err := maybeDoubleHeaderNames([]string{"foo", "BAR", "foo"}); err == nil { + t.Fatal("maybeDoubleHeaderNames did not raise an error when a should have.") + } + + // *** check readTo + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + // Double header allowed, value should be of third row + if samples[0].Foo != "baz" { + t.Fatal("Double header allowed, value should be of third row but is not. Function called is readTo.") + } + // Double header not allowed, should fail + FailIfDoubleHeaderNames = true + if err := readTo(d, &samples); err == nil { + t.Fatal("Double header not allowed but no error raised. Function called is readTo.") + } + + // *** check readEach + FailIfDoubleHeaderNames = false + b = bytes.NewBufferString(`foo,BAR,foo + f,1,baz + e,3,b`) + d = &decoder{in: b} + samples = samples[:0] + c := make(chan Sample) + go func() { + if err := readEach(d, c); err != nil { + t.Fatal(err) + } + }() + for v := range c { + samples = append(samples, v) + } + // Double header allowed, value should be of third row + if samples[0].Foo != "baz" { + t.Fatal("Double header allowed, value should be of third row but is not. Function called is readEach.") + } + // Double header not allowed, should fail + FailIfDoubleHeaderNames = true + b = bytes.NewBufferString(`foo,BAR,foo +f,1,baz +e,3,b`) + d = &decoder{in: b} + c = make(chan Sample) + go func() { + if err := readEach(d, c); err == nil { + t.Fatal("Double header not allowed but no error raised. Function called is readEach.") + } + }() + for v := range c { + samples = append(samples, v) + } +} + +func TestUnmarshalToCallback(t *testing.T) { + b := bytes.NewBufferString(`first,foo,BAR,Baz,last,abc +aa,bb,11,cc,dd,ee +ff,gg,22,hh,ii,jj`) + var samples []SkipFieldSample + if err := UnmarshalBytesToCallback(b.Bytes(), func(s SkipFieldSample) { + samples = append(samples, s) + }); err != nil { + t.Fatal(err) + } + if len(samples) != 2 { + t.Fatalf("expected 2 sample instances, got %d", len(samples)) + } + expected := SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "aa", + Sample: Sample{ + Foo: "bb", + Bar: 11, + Baz: "cc", + }, + Quux: "dd", + }, + Corge: "ee", + } + if expected != samples[0] { + t.Fatalf("expected first sample %v, got %v", expected, samples[0]) + } + expected = SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "ff", + Sample: Sample{ + Foo: "gg", + Bar: 22, + Baz: "hh", + }, + Quux: "ii", + }, + Corge: "jj", + } + if expected != samples[1] { + t.Fatalf("expected first sample %v, got %v", expected, samples[1]) + } +} + +// TestRenamedTypes tests for unmarshaling functions on redefined basic types. +func TestRenamedTypesUnmarshal(t *testing.T) { + b := bytes.NewBufferString(`foo;bar +1,4;1.5 +2,3;2.4`) + d := &decoder{in: b} + var samples []RenamedSample + + // Set different csv field separator to enable comma in floats + SetCSVReader(func(in io.Reader) *csv.Reader { + csvin := csv.NewReader(in) + csvin.Comma = ';' + return csvin + }) + // Switch back to default for tests executed after this + defer SetCSVReader(DefaultCSVReader) + + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if samples[0].RenamedFloatUnmarshaler != 1.4 { + t.Fatalf("Parsed float value wrong for renamed float64 type. Expected 1.4, got %v.", samples[0].RenamedFloatUnmarshaler) + } + if samples[0].RenamedFloatDefault != 1.5 { + t.Fatalf("Parsed float value wrong for renamed float64 type without an explicit unmarshaler function. Expected 1.5, got %v.", samples[0].RenamedFloatDefault) + } + + // Test that errors raised by UnmarshalCSV are correctly reported + b = bytes.NewBufferString(`foo;bar +4.2;2.4`) + d = &decoder{in: b} + samples = samples[:0] + if perr, _ := readTo(d, &samples).(*csv.ParseError); perr == nil { + t.Fatalf("Expected ParseError, got nil.") + } else if _, ok := perr.Err.(UnmarshalError); !ok { + t.Fatalf("Expected UnmarshalError, got %v", perr.Err) + } +} + +func (rf *RenamedFloat64Unmarshaler) UnmarshalCSV(csv string) (err error) { + // Purely for testing purposes: Raise error on specific string + if csv == "4.2" { + return UnmarshalError{"Test error: Invalid float 4.2"} + } + + // Convert , to . before parsing to create valid float strings + converted := strings.Replace(csv, ",", ".", -1) + var f float64 + if f, err = strconv.ParseFloat(converted, 64); err != nil { + return err + } + *rf = RenamedFloat64Unmarshaler(f) + return nil +} + +type UnmarshalError struct { + msg string +} + +func (e UnmarshalError) Error() string { + return e.msg +} + +func TestMultipleStructTags(t *testing.T) { + b := bytes.NewBufferString(`foo,BAR,Baz +e,3,b`) + d := &decoder{in: b} + + var samples []MultiTagSample + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if samples[0].Foo != "b" { + t.Fatalf("expected second tag value 'b' in multi tag struct field, got %v", samples[0].Foo) + } + + b = bytes.NewBufferString(`foo,BAR +e,3`) + d = &decoder{in: b} + + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if samples[0].Foo != "e" { + t.Fatalf("wrong value in multi tag struct field, expected 'e', got %v", samples[0].Foo) + } + + b = bytes.NewBufferString(`BAR,Baz +3,b`) + d = &decoder{in: b} + + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if samples[0].Foo != "b" { + t.Fatal("wrong value in multi tag struct field") + } +} + +func TestStructTagSeparator(t *testing.T) { + b := bytes.NewBufferString(`foo,BAR,Baz +e,3,b`) + d := &decoder{in: b} + + defaultTagSeparator := TagSeparator + TagSeparator = "|" + defer func() { TagSeparator = defaultTagSeparator }() + + var samples []TagSeparatorSample + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + + if samples[0].Foo != "b" { + t.Fatal("expected second tag value in multi tag struct field.") + } +} diff --git a/vendor/github.com/gocarina/gocsv/encode.go b/vendor/github.com/gocarina/gocsv/encode.go new file mode 100644 index 00000000000..52dd0f722aa --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/encode.go @@ -0,0 +1,135 @@ +package gocsv + +import ( + "encoding/csv" + "fmt" + "io" + "reflect" +) + +type encoder struct { + out io.Writer +} + +func newEncoder(out io.Writer) *encoder { + return &encoder{out} +} + +func writeFromChan(writer *csv.Writer, c <-chan interface{}) error { + // Get the first value. It wil determine the header structure. + firstValue := <-c + inValue, inType := getConcreteReflectValueAndType(firstValue) // Get the concrete type + if err := ensureStructOrPtr(inType); err != nil { + return err + } + inInnerWasPointer := inType.Kind() == reflect.Ptr + inInnerStructInfo := getStructInfo(inType) // Get the inner struct info to get CSV annotations + csvHeadersLabels := make([]string, len(inInnerStructInfo.Fields)) + for i, fieldInfo := range inInnerStructInfo.Fields { // Used to write the header (first line) in CSV + csvHeadersLabels[i] = fieldInfo.getFirstKey() + } + if err := writer.Write(csvHeadersLabels); err != nil { + return err + } + write := func(val reflect.Value) error { + for j, fieldInfo := range inInnerStructInfo.Fields { + csvHeadersLabels[j] = "" + inInnerFieldValue, err := getInnerField(val, inInnerWasPointer, fieldInfo.IndexChain) // Get the correct field header <-> position + if err != nil { + return err + } + csvHeadersLabels[j] = inInnerFieldValue + } + if err := writer.Write(csvHeadersLabels); err != nil { + return err + } + return nil + } + if err := write(inValue); err != nil { + return err + } + for v := range c { + val, _ := getConcreteReflectValueAndType(v) // Get the concrete type (not pointer) (Slice or Array) + if err := ensureStructOrPtr(inType); err != nil { + return err + } + if err := write(val); err != nil { + return err + } + } + writer.Flush() + return writer.Error() +} + +func writeTo(writer *csv.Writer, in interface{}) error { + inValue, inType := getConcreteReflectValueAndType(in) // Get the concrete type (not pointer) (Slice or Array) + if err := ensureInType(inType); err != nil { + return err + } + inInnerWasPointer, inInnerType := getConcreteContainerInnerType(inType) // Get the concrete inner type (not pointer) (Container<"?">) + if err := ensureInInnerType(inInnerType); err != nil { + return err + } + inInnerStructInfo := getStructInfo(inInnerType) // Get the inner struct info to get CSV annotations + csvHeadersLabels := make([]string, len(inInnerStructInfo.Fields)) + for i, fieldInfo := range inInnerStructInfo.Fields { // Used to write the header (first line) in CSV + csvHeadersLabels[i] = fieldInfo.getFirstKey() + } + if err := writer.Write(csvHeadersLabels); err != nil { + return err + } + inLen := inValue.Len() + for i := 0; i < inLen; i++ { // Iterate over container rows + for j, fieldInfo := range inInnerStructInfo.Fields { + csvHeadersLabels[j] = "" + inInnerFieldValue, err := getInnerField(inValue.Index(i), inInnerWasPointer, fieldInfo.IndexChain) // Get the correct field header <-> position + if err != nil { + return err + } + csvHeadersLabels[j] = inInnerFieldValue + } + if err := writer.Write(csvHeadersLabels); err != nil { + return err + } + } + writer.Flush() + return writer.Error() +} + +func ensureStructOrPtr(t reflect.Type) error { + switch t.Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + return nil + } + return fmt.Errorf("cannot use " + t.String() + ", only slice or array supported") +} + +// Check if the inType is an array or a slice +func ensureInType(outType reflect.Type) error { + switch outType.Kind() { + case reflect.Slice: + fallthrough + case reflect.Array: + return nil + } + return fmt.Errorf("cannot use " + outType.String() + ", only slice or array supported") +} + +// Check if the inInnerType is of type struct +func ensureInInnerType(outInnerType reflect.Type) error { + switch outInnerType.Kind() { + case reflect.Struct: + return nil + } + return fmt.Errorf("cannot use " + outInnerType.String() + ", only struct supported") +} + +func getInnerField(outInner reflect.Value, outInnerWasPointer bool, index []int) (string, error) { + oi := outInner + if outInnerWasPointer { + oi = outInner.Elem() + } + return getFieldAsString(oi.FieldByIndex(index)) +} diff --git a/vendor/github.com/gocarina/gocsv/encode_test.go b/vendor/github.com/gocarina/gocsv/encode_test.go new file mode 100644 index 00000000000..1bd9e12a37b --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/encode_test.go @@ -0,0 +1,214 @@ +package gocsv + +import ( + "bytes" + "encoding/csv" + "io" + "math" + "strconv" + "strings" + "testing" +) + +func assertLine(t *testing.T, expected, actual []string) { + if len(expected) != len(actual) { + t.Fatalf("line length mismatch between expected: %d and actual: %d", len(expected), len(actual)) + } + for i := range expected { + if expected[i] != actual[i] { + t.Fatalf("mismatch on field %d at line `%s`: %s != %s", i, expected, expected[i], actual[i]) + } + } +} + +func Test_writeTo(t *testing.T) { + b := bytes.Buffer{} + e := &encoder{out: &b} + blah := 2 + s := []Sample{ + {Foo: "f", Bar: 1, Baz: "baz", Frop: 0.1, Blah: &blah}, + {Foo: "e", Bar: 3, Baz: "b", Frop: 6.0 / 13, Blah: nil}, + } + if err := writeTo(csv.NewWriter(e.out), s); err != nil { + t.Fatal(err) + } + + lines, err := csv.NewReader(&b).ReadAll() + if err != nil { + t.Fatal(err) + } + if len(lines) != 3 { + t.Fatalf("expected 3 lines, got %d", len(lines)) + } + assertLine(t, []string{"foo", "BAR", "Baz", "Quux", "Blah"}, lines[0]) + assertLine(t, []string{"f", "1", "baz", "0.1", "2"}, lines[1]) + assertLine(t, []string{"e", "3", "b", "0.46153846153846156", ""}, lines[2]) +} + +func Test_writeTo_multipleTags(t *testing.T) { + b := bytes.Buffer{} + e := &encoder{out: &b} + s := []MultiTagSample{ + {Foo: "abc", Bar: 123}, + {Foo: "def", Bar: 234}, + } + if err := writeTo(csv.NewWriter(e.out), s); err != nil { + t.Fatal(err) + } + + lines, err := csv.NewReader(&b).ReadAll() + if err != nil { + t.Fatal(err) + } + if len(lines) != 3 { + t.Fatalf("expected 3 lines, got %d", len(lines)) + } + // the first tag for each field is the encoding CSV header + assertLine(t, []string{"Baz", "BAR"}, lines[0]) + assertLine(t, []string{"abc", "123"}, lines[1]) + assertLine(t, []string{"def", "234"}, lines[2]) +} + +func Test_writeTo_embed(t *testing.T) { + b := bytes.Buffer{} + e := &encoder{out: &b} + blah := 2 + s := []EmbedSample{ + { + Qux: "aaa", + Sample: Sample{Foo: "f", Bar: 1, Baz: "baz", Frop: 0.2, Blah: &blah}, + Ignore: "shouldn't be marshalled", + Quux: "zzz", + Grault: math.Pi, + }, + } + if err := writeTo(csv.NewWriter(e.out), s); err != nil { + t.Fatal(err) + } + + lines, err := csv.NewReader(&b).ReadAll() + if err != nil { + t.Fatal(err) + } + if len(lines) != 2 { + t.Fatalf("expected 2 lines, got %d", len(lines)) + } + assertLine(t, []string{"first", "foo", "BAR", "Baz", "Quux", "Blah", "garply", "last"}, lines[0]) + assertLine(t, []string{"aaa", "f", "1", "baz", "0.2", "2", "3.141592653589793", "zzz"}, lines[1]) +} + +func Test_writeTo_complex_embed(t *testing.T) { + b := bytes.Buffer{} + e := &encoder{out: &b} + sfs := []SkipFieldSample{ + { + EmbedSample: EmbedSample{ + Qux: "aaa", + Sample: Sample{ + Foo: "bbb", + Bar: 111, + Baz: "ddd", + Frop: 1.2e22, + Blah: nil, + }, + Ignore: "eee", + Grault: 0.1, + Quux: "fff", + }, + MoreIgnore: "ggg", + Corge: "hhh", + }, + } + if err := writeTo(csv.NewWriter(e.out), sfs); err != nil { + t.Fatal(err) + } + lines, err := csv.NewReader(&b).ReadAll() + if err != nil { + t.Fatal(err) + } + if len(lines) != 2 { + t.Fatalf("expected 2 lines, got %d", len(lines)) + } + assertLine(t, []string{"first", "foo", "BAR", "Baz", "Quux", "Blah", "garply", "last", "abc"}, lines[0]) + assertLine(t, []string{"aaa", "bbb", "111", "ddd", "12000000000000000000000", "", "0.1", "fff", "hhh"}, lines[1]) +} + +func Test_writeToChan(t *testing.T) { + b := bytes.Buffer{} + e := &encoder{out: &b} + c := make(chan interface{}) + go func() { + for i := 0; i < 100; i++ { + v := Sample{Foo: "f", Bar: i, Baz: "baz" + strconv.Itoa(i), Frop: float64(i), Blah: nil} + c <- v + } + close(c) + }() + if err := MarshalChan(c, csv.NewWriter(e.out)); err != nil { + t.Fatal(err) + } + lines, err := csv.NewReader(&b).ReadAll() + if err != nil { + t.Fatal(err) + } + if len(lines) != 101 { + t.Fatalf("expected 100 lines, got %d", len(lines)) + } + for i, l := range lines { + if i == 0 { + assertLine(t, []string{"foo", "BAR", "Baz", "Quux", "Blah"}, l) + continue + } + assertLine(t, []string{"f", strconv.Itoa(i - 1), "baz" + strconv.Itoa(i-1), strconv.FormatFloat(float64(i-1), 'f', -1, 64), ""}, l) + } +} + +// TestRenamedTypes tests for marshaling functions on redefined basic types. +func TestRenamedTypesMarshal(t *testing.T) { + samples := []RenamedSample{ + {RenamedFloatUnmarshaler: 1.4, RenamedFloatDefault: 1.5}, + {RenamedFloatUnmarshaler: 2.3, RenamedFloatDefault: 2.4}, + } + + SetCSVWriter(func(out io.Writer) *csv.Writer { + csvout := csv.NewWriter(out) + csvout.Comma = ';' + return csvout + }) + // Switch back to default for tests executed after this + defer SetCSVWriter(DefaultCSVWriter) + + csvContent, err := MarshalString(&samples) + if err != nil { + t.Fatal(err) + } + if csvContent != "foo;bar\n1,4;1.5\n2,3;2.4\n" { + t.Fatalf("Error marshaling floats with , as separator. Expected \nfoo;bar\n1,4;1.5\n2,3;2.4\ngot:\n%v", csvContent) + } + + // Test that errors raised by MarshalCSV are correctly reported + samples = []RenamedSample{ + {RenamedFloatUnmarshaler: 4.2, RenamedFloatDefault: 1.5}, + } + _, err = MarshalString(&samples) + if _, ok := err.(MarshalError); !ok { + t.Fatalf("Expected UnmarshalError, got %v", err) + } +} + +func (rf *RenamedFloat64Unmarshaler) MarshalCSV() (csv string, err error) { + if *rf == RenamedFloat64Unmarshaler(4.2) { + return "", MarshalError{"Test error: Invalid float 4.2"} + } + csv = strconv.FormatFloat(float64(*rf), 'f', 1, 64) + csv = strings.Replace(csv, ".", ",", -1) + return csv, nil +} + +type MarshalError struct { + msg string +} + +func (e MarshalError) Error() string { + return e.msg +} diff --git a/vendor/github.com/gocarina/gocsv/reflect.go b/vendor/github.com/gocarina/gocsv/reflect.go new file mode 100644 index 00000000000..e96fb57a8f3 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/reflect.go @@ -0,0 +1,104 @@ +package gocsv + +import ( + "reflect" + "strings" + "sync" +) + +// -------------------------------------------------------------------------- +// Reflection helpers + +type structInfo struct { + Fields []fieldInfo +} + +// fieldInfo is a struct field that should be mapped to a CSV column, or vica-versa +// Each IndexChain element before the last is the index of an the embedded struct field +// that defines Key as a tag +type fieldInfo struct { + keys []string + IndexChain []int +} + +func (f fieldInfo) getFirstKey() string { + return f.keys[0] +} + +func (f fieldInfo) matchesKey(key string) bool { + for _, k := range f.keys { + if key == k { + return true + } + } + return false +} + +var structMap = make(map[reflect.Type]*structInfo) +var structMapMutex sync.RWMutex + +func getStructInfo(rType reflect.Type) *structInfo { + structMapMutex.RLock() + stInfo, ok := structMap[rType] + structMapMutex.RUnlock() + if ok { + return stInfo + } + fieldsList := getFieldInfos(rType, []int{}) + stInfo = &structInfo{fieldsList} + return stInfo +} + +func getFieldInfos(rType reflect.Type, parentIndexChain []int) []fieldInfo { + fieldsCount := rType.NumField() + fieldsList := make([]fieldInfo, 0, fieldsCount) + for i := 0; i < fieldsCount; i++ { + field := rType.Field(i) + if field.PkgPath != "" { + continue + } + indexChain := append(parentIndexChain, i) + // if the field is an embedded struct, create a fieldInfo for each of its fields + if field.Anonymous && field.Type.Kind() == reflect.Struct { + fieldsList = append(fieldsList, getFieldInfos(field.Type, indexChain)...) + continue + } + fieldInfo := fieldInfo{IndexChain: indexChain} + fieldTag := field.Tag.Get("csv") + fieldTags := strings.Split(fieldTag, TagSeparator) + filteredTags := []string{} + for _, fieldTagEntry := range fieldTags { + if fieldTagEntry != "omitempty" { + filteredTags = append(filteredTags, fieldTagEntry) + } + } + + if len(filteredTags) == 1 && filteredTags[0] == "-" { + continue + } else if len(filteredTags) > 0 && filteredTags[0] != "" { + fieldInfo.keys = filteredTags + } else { + fieldInfo.keys = []string{field.Name} + } + fieldsList = append(fieldsList, fieldInfo) + } + return fieldsList +} + +func getConcreteContainerInnerType(in reflect.Type) (inInnerWasPointer bool, inInnerType reflect.Type) { + inInnerType = in.Elem() + inInnerWasPointer = false + if inInnerType.Kind() == reflect.Ptr { + inInnerWasPointer = true + inInnerType = inInnerType.Elem() + } + return inInnerWasPointer, inInnerType +} + +func getConcreteReflectValueAndType(in interface{}) (reflect.Value, reflect.Type) { + value := reflect.ValueOf(in) + if value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value, value.Type() +} diff --git a/vendor/github.com/gocarina/gocsv/sample_structs_test.go b/vendor/github.com/gocarina/gocsv/sample_structs_test.go new file mode 100644 index 00000000000..90dccbc0430 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/sample_structs_test.go @@ -0,0 +1,42 @@ +package gocsv + +type Sample struct { + Foo string `csv:"foo"` + Bar int `csv:"BAR"` + Baz string `csv:"Baz"` + Frop float64 `csv:"Quux"` + Blah *int `csv:"Blah"` +} + +type EmbedSample struct { + Qux string `csv:"first"` + Sample + Ignore string `csv:"-"` + Grault float64 `csv:"garply"` + Quux string `csv:"last"` +} + +type SkipFieldSample struct { + EmbedSample + MoreIgnore string `csv:"-"` + Corge string `csv:"abc"` +} + +// Testtype for unmarshal/marshal functions on renamed basic types +type RenamedFloat64Unmarshaler float64 +type RenamedFloat64Default float64 + +type RenamedSample struct { + RenamedFloatUnmarshaler RenamedFloat64Unmarshaler `csv:"foo"` + RenamedFloatDefault RenamedFloat64Default `csv:"bar"` +} + +type MultiTagSample struct { + Foo string `csv:"Baz,foo"` + Bar int `csv:"BAR"` +} + +type TagSeparatorSample struct { + Foo string `csv:"Baz|foo"` + Bar int `csv:"BAR"` +} diff --git a/vendor/github.com/gocarina/gocsv/types.go b/vendor/github.com/gocarina/gocsv/types.go new file mode 100644 index 00000000000..3af1ad30a11 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/types.go @@ -0,0 +1,430 @@ +package gocsv + +import ( + "encoding" + "fmt" + "reflect" + "strconv" + "strings" +) + +// -------------------------------------------------------------------------- +// Conversion interfaces + +// TypeMarshaller is implemented by any value that has a MarshalCSV method +// This converter is used to convert the value to it string representation +type TypeMarshaller interface { + MarshalCSV() (string, error) +} + +// Stringer is implemented by any value that has a String method +// This converter is used to convert the value to it string representation +// This converter will be used if your value does not implement TypeMarshaller +type Stringer interface { + String() string +} + +// TypeUnmarshaller is implemented by any value that has an UnmarshalCSV method +// This converter is used to convert a string to your value representation of that string +type TypeUnmarshaller interface { + UnmarshalCSV(string) error +} + +// NoUnmarshalFuncError is the custom error type to be raised in case there is no unmarshal function defined on type +type NoUnmarshalFuncError struct { + msg string +} + +func (e NoUnmarshalFuncError) Error() string { + return e.msg +} + +// NoMarshalFuncError is the custom error type to be raised in case there is no marshal function defined on type +type NoMarshalFuncError struct { + msg string +} + +func (e NoMarshalFuncError) Error() string { + return e.msg +} + +var ( + stringerType = reflect.TypeOf((*Stringer)(nil)).Elem() + marshallerType = reflect.TypeOf((*TypeMarshaller)(nil)).Elem() + unMarshallerType = reflect.TypeOf((*TypeUnmarshaller)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnMarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// -------------------------------------------------------------------------- +// Conversion helpers + +func toString(in interface{}) (string, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + return inValue.String(), nil + case reflect.Bool: + b := inValue.Bool() + if b { + return "true", nil + } + return "false", nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fmt.Sprintf("%v", inValue.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fmt.Sprintf("%v", inValue.Uint()), nil + case reflect.Float32: + return strconv.FormatFloat(inValue.Float(), byte('f'), -1, 32), nil + case reflect.Float64: + return strconv.FormatFloat(inValue.Float(), byte('f'), -1, 64), nil + } + return "", fmt.Errorf("No known conversion from " + inValue.Type().String() + " to string") +} + +func toBool(in interface{}) (bool, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + s := inValue.String() + switch s { + case "yes": + return true, nil + case "no", "": + return false, nil + default: + return strconv.ParseBool(s) + } + case reflect.Bool: + return inValue.Bool(), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i := inValue.Int() + if i != 0 { + return true, nil + } + return false, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + i := inValue.Uint() + if i != 0 { + return true, nil + } + return false, nil + case reflect.Float32, reflect.Float64: + f := inValue.Float() + if f != 0 { + return true, nil + } + return false, nil + } + return false, fmt.Errorf("No known conversion from " + inValue.Type().String() + " to bool") +} + +func toInt(in interface{}) (int64, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + s := strings.TrimSpace(inValue.String()) + if s == "" { + return 0, nil + } + return strconv.ParseInt(s, 0, 64) + case reflect.Bool: + if inValue.Bool() { + return 1, nil + } + return 0, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return inValue.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return int64(inValue.Uint()), nil + case reflect.Float32, reflect.Float64: + return int64(inValue.Float()), nil + } + return 0, fmt.Errorf("No known conversion from " + inValue.Type().String() + " to int") +} + +func toUint(in interface{}) (uint64, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + s := strings.TrimSpace(inValue.String()) + if s == "" { + return 0, nil + } + + // support the float input + if strings.Contains(s, ".") { + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return 0, err + } + return uint64(f), nil + } + return strconv.ParseUint(s, 0, 64) + case reflect.Bool: + if inValue.Bool() { + return 1, nil + } + return 0, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return uint64(inValue.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return inValue.Uint(), nil + case reflect.Float32, reflect.Float64: + return uint64(inValue.Float()), nil + } + return 0, fmt.Errorf("No known conversion from " + inValue.Type().String() + " to uint") +} + +func toFloat(in interface{}) (float64, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + s := strings.TrimSpace(inValue.String()) + if s == "" { + return 0, nil + } + return strconv.ParseFloat(s, 64) + case reflect.Bool: + if inValue.Bool() { + return 1, nil + } + return 0, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(inValue.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return float64(inValue.Uint()), nil + case reflect.Float32, reflect.Float64: + return inValue.Float(), nil + } + return 0, fmt.Errorf("No known conversion from " + inValue.Type().String() + " to float") +} + +func setField(field reflect.Value, value string) error { + switch field.Interface().(type) { + case string: + s, err := toString(value) + if err != nil { + return err + } + field.SetString(s) + case bool: + b, err := toBool(value) + if err != nil { + return err + } + field.SetBool(b) + case int, int8, int16, int32, int64: + i, err := toInt(value) + if err != nil { + return err + } + field.SetInt(i) + case uint, uint8, uint16, uint32, uint64: + ui, err := toUint(value) + if err != nil { + return err + } + field.SetUint(ui) + case float32, float64: + f, err := toFloat(value) + if err != nil { + return err + } + field.SetFloat(f) + default: + // Not a native type, check for unmarshal method + if err := unmarshall(field, value); err != nil { + if _, ok := err.(NoUnmarshalFuncError); !ok { + return err + } + // Could not unmarshal, check for kind, e.g. renamed type from basic type + switch field.Kind() { + case reflect.String: + s, err := toString(value) + if err != nil { + return err + } + field.SetString(s) + case reflect.Bool: + b, err := toBool(value) + if err != nil { + return err + } + field.SetBool(b) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i, err := toInt(value) + if err != nil { + return err + } + field.SetInt(i) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ui, err := toUint(value) + if err != nil { + return err + } + field.SetUint(ui) + case reflect.Float32, reflect.Float64: + f, err := toFloat(value) + if err != nil { + return err + } + field.SetFloat(f) + default: + return err + } + } else { + return nil + } + } + return nil +} + +func getFieldAsString(field reflect.Value) (str string, err error) { + switch field.Kind() { + case reflect.Interface: + case reflect.Ptr: + if field.IsNil() { + return "", nil + } + return getFieldAsString(field.Elem()) + default: + // Check if field is go native type + switch field.Interface().(type) { + case string: + return field.String(), nil + case bool: + str, err = toString(field.Bool()) + if err != nil { + return str, err + } + case int, int8, int16, int32, int64: + str, err = toString(field.Int()) + if err != nil { + return str, err + } + case uint, uint8, uint16, uint32, uint64: + str, err = toString(field.Uint()) + if err != nil { + return str, err + } + case float32: + str, err = toString(float32(field.Float())) + if err != nil { + return str, err + } + case float64: + str, err = toString(field.Float()) + if err != nil { + return str, err + } + default: + // Not a native type, check for marshal method + str, err = marshall(field) + if err != nil { + if _, ok := err.(NoMarshalFuncError); !ok { + return str, err + } + // If not marshal method, is field compatible with/renamed from native type + switch field.Kind() { + case reflect.String: + return field.String(), nil + case reflect.Bool: + str, err = toString(field.Bool()) + if err != nil { + return str, err + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + str, err = toString(field.Int()) + if err != nil { + return str, err + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + str, err = toString(field.Uint()) + if err != nil { + return str, err + } + case reflect.Float32: + str, err = toString(float32(field.Float())) + if err != nil { + return str, err + } + case reflect.Float64: + str, err = toString(field.Float()) + if err != nil { + return str, err + } + } + } else { + return str, nil + } + } + } + return str, nil +} + +// -------------------------------------------------------------------------- +// Un/serializations helpers + +func unmarshall(field reflect.Value, value string) error { + dupField := field + unMarshallIt := func(finalField reflect.Value) error { + if finalField.CanInterface() && finalField.Type().Implements(unMarshallerType) { + if err := finalField.Interface().(TypeUnmarshaller).UnmarshalCSV(value); err != nil { + return err + } + return nil + } else if finalField.CanInterface() && finalField.Type().Implements(textUnMarshalerType) { // Otherwise try to use TextMarshaller + if err := finalField.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(value)); err != nil { + return err + } + return nil + } + + return NoUnmarshalFuncError{"No known conversion from string to " + field.Type().String() + ", " + field.Type().String() + " does not implements TypeUnmarshaller"} + } + for dupField.Kind() == reflect.Interface || dupField.Kind() == reflect.Ptr { + if dupField.IsNil() { + dupField = reflect.New(field.Type().Elem()) + field.Set(dupField) + return unMarshallIt(dupField) + break + } + dupField = dupField.Elem() + } + if dupField.CanAddr() { + return unMarshallIt(dupField.Addr()) + } + return NoUnmarshalFuncError{"No known conversion from string to " + field.Type().String() + ", " + field.Type().String() + " does not implements TypeUnmarshaller"} +} + +func marshall(field reflect.Value) (value string, err error) { + dupField := field + marshallIt := func(finalField reflect.Value) (string, error) { + if finalField.CanInterface() && finalField.Type().Implements(marshallerType) { // Use TypeMarshaller when possible + return finalField.Interface().(TypeMarshaller).MarshalCSV() + } else if finalField.CanInterface() && finalField.Type().Implements(stringerType) { // Otherwise try to use Stringer + return finalField.Interface().(Stringer).String(), nil + } else if finalField.CanInterface() && finalField.Type().Implements(textMarshalerType) { // Otherwise try to use TextMarshaller + text, err := finalField.Interface().(encoding.TextMarshaler).MarshalText() + return string(text), err + } + + return value, NoMarshalFuncError{"No known conversion from " + field.Type().String() + " to string, " + field.Type().String() + " does not implements TypeMarshaller nor Stringer"} + } + for dupField.Kind() == reflect.Interface || dupField.Kind() == reflect.Ptr { + if dupField.IsNil() { + return value, nil + } + dupField = dupField.Elem() + } + if dupField.CanAddr() { + return marshallIt(dupField.Addr()) + } + return value, NoMarshalFuncError{"No known conversion from " + field.Type().String() + " to string, " + field.Type().String() + " does not implements TypeMarshaller nor Stringer"} +} diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 00000000000..f9c841a51e0 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 00000000000..659d6885fc7 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 00000000000..115ae67c115 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,154 @@ +package mapstructure + +import ( + "errors" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Type, to reflect.Type, + data interface{}) (interface{}, error) { + // Build our arguments that reflect expects + argVals := make([]reflect.Value, 3) + argVals[0] = reflect.ValueOf(from) + argVals[1] = reflect.ValueOf(to) + argVals[2] = reflect.ValueOf(data) + + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from, to, data) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), data) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + var err error + for _, f1 := range fs { + data, err = DecodeHookExec(f1, f, t, data) + if err != nil { + return nil, err + } + + // Modify the from kind to be correct with the new data + f = nil + if val := reflect.ValueOf(data); val.IsValid() { + f = val.Type() + } + } + + return data, nil + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } else { + return "0", nil + } + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go new file mode 100644 index 00000000000..53289afcfbf --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go @@ -0,0 +1,229 @@ +package mapstructure + +import ( + "errors" + "reflect" + "testing" + "time" +) + +func TestComposeDecodeHookFunc(t *testing.T) { + f1 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return data.(string) + "foo", nil + } + + f2 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return data.(string) + "bar", nil + } + + f := ComposeDecodeHookFunc(f1, f2) + + result, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "") + if err != nil { + t.Fatalf("bad: %s", err) + } + if result.(string) != "foobar" { + t.Fatalf("bad: %#v", result) + } +} + +func TestComposeDecodeHookFunc_err(t *testing.T) { + f1 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) { + return nil, errors.New("foo") + } + + f2 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) { + panic("NOPE") + } + + f := ComposeDecodeHookFunc(f1, f2) + + _, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), 42) + if err.Error() != "foo" { + t.Fatalf("bad: %s", err) + } +} + +func TestComposeDecodeHookFunc_kinds(t *testing.T) { + var f2From reflect.Kind + + f1 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return int(42), nil + } + + f2 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + f2From = f + return data, nil + } + + f := ComposeDecodeHookFunc(f1, f2) + + _, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "") + if err != nil { + t.Fatalf("bad: %s", err) + } + if f2From != reflect.Int { + t.Fatalf("bad: %#v", f2From) + } +} + +func TestStringToSliceHookFunc(t *testing.T) { + f := StringToSliceHookFunc(",") + + strType := reflect.TypeOf("") + sliceType := reflect.TypeOf([]byte("")) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + {sliceType, sliceType, 42, 42, false}, + {strType, strType, 42, 42, false}, + { + strType, + sliceType, + "foo,bar,baz", + []string{"foo", "bar", "baz"}, + false, + }, + { + strType, + sliceType, + "", + []string{}, + false, + }, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} + +func TestStringToTimeDurationHookFunc(t *testing.T) { + f := StringToTimeDurationHookFunc() + + strType := reflect.TypeOf("") + timeType := reflect.TypeOf(time.Duration(5)) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + {strType, timeType, "5s", 5 * time.Second, false}, + {strType, timeType, "5", time.Duration(0), true}, + {strType, strType, "5", "5", false}, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} + +func TestWeaklyTypedHook(t *testing.T) { + var f DecodeHookFunc = WeaklyTypedHook + + boolType := reflect.TypeOf(true) + strType := reflect.TypeOf("") + sliceType := reflect.TypeOf([]byte("")) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + // TO STRING + { + boolType, + strType, + false, + "0", + false, + }, + + { + boolType, + strType, + true, + "1", + false, + }, + + { + reflect.TypeOf(float32(1)), + strType, + float32(7), + "7", + false, + }, + + { + reflect.TypeOf(int(1)), + strType, + int(7), + "7", + false, + }, + + { + sliceType, + strType, + []uint8("foo"), + "foo", + false, + }, + + { + reflect.TypeOf(uint(1)), + strType, + uint(7), + "7", + false, + }, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 00000000000..47a99e5af3f --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 00000000000..a554e799bb7 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,790 @@ +// The mapstructure package exposes functionality to convert an +// abitrary map[string]interface{} into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type should be DecodeHookFuncType or DecodeHookFuncKind. +// Either is accepted. Types are a superset of Kinds (Types can return +// Kinds) and are generally a richer thing to use, but Kinds are simpler +// if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. + // + // If an error is returned, the entire decode will fail with that + // error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // + WeaklyTypedInput bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string +} + +// Decode takes a map and uses reflection to convert it into the +// given Go native structure. val must be a pointer to a struct. +func Decode(m interface{}, rawVal interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: rawVal, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(m) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(raw interface{}) error { + return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error { + if data == nil { + // If the data is nil, then we don't set anything. + return nil + } + + dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + // If the data value is invalid, then we just set the value + // to be the zero value. + val.Set(reflect.Zero(val.Type())) + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the data. + var err error + data, err = DecodeHookExec( + d.config.DecodeHook, + dataVal.Type(), val.Type(), data) + if err != nil { + return err + } + } + + var err error + dataKind := getKind(val) + switch dataKind { + case reflect.Bool: + err = d.decodeBool(name, data, val) + case reflect.Interface: + err = d.decodeBasic(name, data, val) + case reflect.String: + err = d.decodeString(name, data, val) + case reflect.Int: + err = d.decodeInt(name, data, val) + case reflect.Uint: + err = d.decodeUint(name, data, val) + case reflect.Float32: + err = d.decodeFloat(name, data, val) + case reflect.Struct: + err = d.decodeStruct(name, data, val) + case reflect.Map: + err = d.decodeMap(name, data, val) + case reflect.Ptr: + err = d.decodePtr(name, data, val) + case reflect.Slice: + err = d.decodeSlice(name, data, val) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, dataKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metadata. + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch { + case elemKind == reflect.Uint8: + val.SetString(string(dataVal.Interface().([]uint8))) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(float64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if dataVal.Kind() != reflect.Map { + // In weak mode, we accept a slice of maps as an input... + if d.config.WeaklyTypedInput { + switch dataVal.Kind() { + case reflect.Array, reflect.Slice: + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + fmt.Sprintf("%s[%d]", name, i), + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil + } + } + + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } + + // Accumulate errors + errors := make([]string, 0) + + for _, k := range dataVal.MapKeys() { + fieldName := fmt.Sprintf("%s[%s]", name, k) + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + realVal := reflect.New(valElemType) + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return err + } + + val.Set(realVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + // Accept empty map instead of array/slice in weakly typed mode + if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } else { + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + } + + // Make a new slice to hold our result, same size as the original data. + valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valSlice.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + if dataValKind != reflect.Map { + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) + } + + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + fields := make(map[*reflect.StructField]reflect.Value) + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldKind := fieldType.Type.Kind() + + if fieldType.Anonymous { + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind)) + continue + } + } + + // If "squash" is specified in the tag, we squash the field down. + squash := false + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + } else { + structs = append(structs, val.FieldByName(fieldType.Name)) + } + continue + } + + // Normal struct field, store it away + fields[&fieldType] = structVal.Field(i) + } + } + + for fieldType, field := range fields { + fieldName := fieldType.Name + + tagValue := fieldType.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey, _ := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if strings.EqualFold(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Just ignore. + continue + } + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + if !field.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !field.CanSet() { + continue + } + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + } + + if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil { + errors = appendErrors(errors, err) + } + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey, _ := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey, _ := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = fmt.Sprintf("%s.%s", name, key) + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + } + + return nil +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go new file mode 100644 index 00000000000..41d2a41f754 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go @@ -0,0 +1,279 @@ +package mapstructure + +import ( + "encoding/json" + "testing" +) + +func Benchmark_Decode(b *testing.B) { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "emails": []string{"one", "two", "three"}, + "extra": map[string]string{ + "twitter": "mitchellh", + }, + } + + var result Person + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +// decodeViaJSON takes the map data and passes it through encoding/json to convert it into the +// given Go native structure pointed to by v. v must be a pointer to a struct. +func decodeViaJSON(data interface{}, v interface{}) error { + // Perform the task by simply marshalling the input into JSON, + // then unmarshalling it into target native Go struct. + b, err := json.Marshal(data) + if err != nil { + return err + } + return json.Unmarshal(b, v) +} + +func Benchmark_DecodeViaJSON(b *testing.B) { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "emails": []string{"one", "two", "three"}, + "extra": map[string]string{ + "twitter": "mitchellh", + }, + } + + var result Person + for i := 0; i < b.N; i++ { + decodeViaJSON(input, &result) + } +} + +func Benchmark_DecodeBasic(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "Vuint": 42, + "vbool": true, + "Vfloat": 42.42, + "vsilent": true, + "vdata": 42, + } + + var result Basic + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeEmbedded(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result Embedded + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeTypeConversion(b *testing.B) { + input := map[string]interface{}{ + "IntToFloat": 42, + "IntToUint": 42, + "IntToBool": 1, + "IntToString": 42, + "UintToInt": 42, + "UintToFloat": 42, + "UintToBool": 42, + "UintToString": 42, + "BoolToInt": true, + "BoolToUint": true, + "BoolToFloat": true, + "BoolToString": true, + "FloatToInt": 42.42, + "FloatToUint": 42.42, + "FloatToBool": 42.42, + "FloatToString": 42.42, + "StringToInt": "42", + "StringToUint": "42", + "StringToBool": "1", + "StringToFloat": "42.42", + "SliceToMap": []interface{}{}, + "MapToSlice": map[string]interface{}{}, + } + + var resultStrict TypeConversionResult + for i := 0; i < b.N; i++ { + Decode(input, &resultStrict) + } +} + +func Benchmark_DecodeMap(b *testing.B) { + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeMapOfStruct(b *testing.B) { + input := map[string]interface{}{ + "value": map[string]interface{}{ + "foo": map[string]string{"vstring": "one"}, + "bar": map[string]string{"vstring": "two"}, + }, + } + + var result MapOfStruct + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeSlice(b *testing.B) { + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": []string{"foo", "bar", "baz"}, + } + + var result Slice + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeSliceOfStruct(b *testing.B) { + input := map[string]interface{}{ + "value": []map[string]interface{}{ + {"vstring": "one"}, + {"vstring": "two"}, + }, + } + + var result SliceOfStruct + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeWeaklyTypedInput(b *testing.B) { + type Person struct { + Name string + Age int + Emails []string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON, generated by a weakly typed language + // such as PHP. + input := map[string]interface{}{ + "name": 123, // number => string + "age": "42", // string => number + "emails": map[string]interface{}{}, // empty map => empty array + } + + var result Person + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeMetadata(b *testing.B) { + type Person struct { + Name string + Age int + } + + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "email": "foo@bar.com", + } + + var md Metadata + var result Person + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeMetadataEmbedded(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var md Metadata + var result EmbeddedSquash + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + b.Fatalf("err: %s", err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeTagged(b *testing.B) { + input := map[string]interface{}{ + "foo": "bar", + "bar": "value", + } + + var result Tagged + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go new file mode 100644 index 00000000000..7054f1ac9ab --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go @@ -0,0 +1,47 @@ +package mapstructure + +import "testing" + +// GH-1 +func TestDecode_NilValue(t *testing.T) { + input := map[string]interface{}{ + "vfoo": nil, + "vother": nil, + } + + var result Map + err := Decode(input, &result) + if err != nil { + t.Fatalf("should not error: %s", err) + } + + if result.Vfoo != "" { + t.Fatalf("value should be default: %s", result.Vfoo) + } + + if result.Vother != nil { + t.Fatalf("Vother should be nil: %s", result.Vother) + } +} + +// GH-10 +func TestDecode_mapInterfaceInterface(t *testing.T) { + input := map[interface{}]interface{}{ + "vfoo": nil, + "vother": nil, + } + + var result Map + err := Decode(input, &result) + if err != nil { + t.Fatalf("should not error: %s", err) + } + + if result.Vfoo != "" { + t.Fatalf("value should be default: %s", result.Vfoo) + } + + if result.Vother != nil { + t.Fatalf("Vother should be nil: %s", result.Vother) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go new file mode 100644 index 00000000000..f17c214a8a9 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go @@ -0,0 +1,203 @@ +package mapstructure + +import ( + "fmt" +) + +func ExampleDecode() { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "emails": []string{"one", "two", "three"}, + "extra": map[string]string{ + "twitter": "mitchellh", + }, + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: + // mapstructure.Person{Name:"Mitchell", Age:91, Emails:[]string{"one", "two", "three"}, Extra:map[string]string{"twitter":"mitchellh"}} +} + +func ExampleDecode_errors() { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": 123, + "age": "bad value", + "emails": []int{1, 2, 3}, + } + + var result Person + err := Decode(input, &result) + if err == nil { + panic("should have an error") + } + + fmt.Println(err.Error()) + // Output: + // 5 error(s) decoding: + // + // * 'Age' expected type 'int', got unconvertible type 'string' + // * 'Emails[0]' expected type 'string', got unconvertible type 'int' + // * 'Emails[1]' expected type 'string', got unconvertible type 'int' + // * 'Emails[2]' expected type 'string', got unconvertible type 'int' + // * 'Name' expected type 'string', got unconvertible type 'int' +} + +func ExampleDecode_metadata() { + type Person struct { + Name string + Age int + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "email": "foo@bar.com", + } + + // For metadata, we make a more advanced DecoderConfig so we can + // more finely configure the decoder that is used. In this case, we + // just tell the decoder we want to track metadata. + var md Metadata + var result Person + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + if err := decoder.Decode(input); err != nil { + panic(err) + } + + fmt.Printf("Unused keys: %#v", md.Unused) + // Output: + // Unused keys: []string{"email"} +} + +func ExampleDecode_weaklyTypedInput() { + type Person struct { + Name string + Age int + Emails []string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON, generated by a weakly typed language + // such as PHP. + input := map[string]interface{}{ + "name": 123, // number => string + "age": "42", // string => number + "emails": map[string]interface{}{}, // empty map => empty array + } + + var result Person + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + err = decoder.Decode(input) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: mapstructure.Person{Name:"123", Age:42, Emails:[]string{}} +} + +func ExampleDecode_tags() { + // Note that the mapstructure tags defined in the struct type + // can indicate which fields the values are mapped to. + type Person struct { + Name string `mapstructure:"person_name"` + Age int `mapstructure:"person_age"` + } + + input := map[string]interface{}{ + "person_name": "Mitchell", + "person_age": 91, + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: + // mapstructure.Person{Name:"Mitchell", Age:91} +} + +func ExampleDecode_embeddedStruct() { + // Squashing multiple embedded structs is allowed using the squash tag. + // This is demonstrated by creating a composite struct of multiple types + // and decoding into it. In this case, a person can carry with it both + // a Family and a Location, as well as their own FirstName. + type Family struct { + LastName string + } + type Location struct { + City string + } + type Person struct { + Family `mapstructure:",squash"` + Location `mapstructure:",squash"` + FirstName string + } + + input := map[string]interface{}{ + "FirstName": "Mitchell", + "LastName": "Hashimoto", + "City": "San Francisco", + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%s %s, %s", result.FirstName, result.LastName, result.City) + // Output: + // Mitchell Hashimoto, San Francisco +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go new file mode 100644 index 00000000000..ea219241454 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go @@ -0,0 +1,1107 @@ +package mapstructure + +import ( + "encoding/json" + "io" + "reflect" + "sort" + "strings" + "testing" +) + +type Basic struct { + Vstring string + Vint int + Vuint uint + Vbool bool + Vfloat float64 + Vextra string + vsilent bool + Vdata interface{} + VjsonInt int + VjsonFloat float64 + VjsonNumber json.Number +} + +type BasicSquash struct { + Test Basic `mapstructure:",squash"` +} + +type Embedded struct { + Basic + Vunique string +} + +type EmbeddedPointer struct { + *Basic + Vunique string +} + +type EmbeddedSquash struct { + Basic `mapstructure:",squash"` + Vunique string +} + +type SquashOnNonStructType struct { + InvalidSquashType int `mapstructure:",squash"` +} + +type Map struct { + Vfoo string + Vother map[string]string +} + +type MapOfStruct struct { + Value map[string]Basic +} + +type Nested struct { + Vfoo string + Vbar Basic +} + +type NestedPointer struct { + Vfoo string + Vbar *Basic +} + +type NilInterface struct { + W io.Writer +} + +type Slice struct { + Vfoo string + Vbar []string +} + +type SliceOfStruct struct { + Value []Basic +} + +type Tagged struct { + Extra string `mapstructure:"bar,what,what"` + Value string `mapstructure:"foo"` +} + +type TypeConversionResult struct { + IntToFloat float32 + IntToUint uint + IntToBool bool + IntToString string + UintToInt int + UintToFloat float32 + UintToBool bool + UintToString string + BoolToInt int + BoolToUint uint + BoolToFloat float32 + BoolToString string + FloatToInt int + FloatToUint uint + FloatToBool bool + FloatToString string + SliceUint8ToString string + StringToInt int + StringToUint uint + StringToBool bool + StringToFloat float32 + SliceToMap map[string]interface{} + MapToSlice []interface{} +} + +func TestBasicTypes(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "Vuint": 42, + "vbool": true, + "Vfloat": 42.42, + "vsilent": true, + "vdata": 42, + "vjsonInt": json.Number("1234"), + "vjsonFloat": json.Number("1234.5"), + "vjsonNumber": json.Number("1234.5"), + } + + var result Basic + err := Decode(input, &result) + if err != nil { + t.Errorf("got an err: %s", err.Error()) + t.FailNow() + } + + if result.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vstring) + } + + if result.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vint) + } + + if result.Vuint != 42 { + t.Errorf("vuint value should be 42: %#v", result.Vuint) + } + + if result.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbool) + } + + if result.Vfloat != 42.42 { + t.Errorf("vfloat value should be 42.42: %#v", result.Vfloat) + } + + if result.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vextra) + } + + if result.vsilent != false { + t.Error("vsilent should not be set, it is unexported") + } + + if result.Vdata != 42 { + t.Error("vdata should be valid") + } + + if result.VjsonInt != 1234 { + t.Errorf("vjsonint value should be 1234: %#v", result.VjsonInt) + } + + if result.VjsonFloat != 1234.5 { + t.Errorf("vjsonfloat value should be 1234.5: %#v", result.VjsonFloat) + } + + if !reflect.DeepEqual(result.VjsonNumber, json.Number("1234.5")) { + t.Errorf("vjsonnumber value should be '1234.5': %T, %#v", result.VjsonNumber, result.VjsonNumber) + } +} + +func TestBasic_IntWithFloat(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": float64(42), + } + + var result Basic + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } +} + +func TestBasic_Merge(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": 42, + } + + var result Basic + result.Vuint = 100 + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + expected := Basic{ + Vint: 42, + Vuint: 100, + } + if !reflect.DeepEqual(result, expected) { + t.Fatalf("bad: %#v", result) + } +} + +func TestDecode_BasicSquash(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + } + + var result BasicSquash + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Test.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Test.Vstring) + } +} + +func TestDecode_Embedded(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result Embedded + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vstring != "innerfoo" { + t.Errorf("vstring value should be 'innerfoo': %#v", result.Vstring) + } + + if result.Vunique != "bar" { + t.Errorf("vunique value should be 'bar': %#v", result.Vunique) + } +} + +func TestDecode_EmbeddedPointer(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result EmbeddedPointer + err := Decode(input, &result) + if err == nil { + t.Fatal("should get error") + } +} + +func TestDecode_EmbeddedSquash(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var result EmbeddedSquash + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vstring) + } + + if result.Vunique != "bar" { + t.Errorf("vunique value should be 'bar': %#v", result.Vunique) + } +} + +func TestDecode_SquashOnNonStructType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "InvalidSquashType": 42, + } + + var result SquashOnNonStructType + err := Decode(input, &result) + if err == nil { + t.Fatal("unexpected success decoding invalid squash field type") + } else if !strings.Contains(err.Error(), "unsupported type for squash") { + t.Fatalf("unexpected error message for invalid squash field type: %s", err) + } +} + +func TestDecode_DecodeHook(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": "WHAT", + } + + decodeHook := func(from reflect.Kind, to reflect.Kind, v interface{}) (interface{}, error) { + if from == reflect.String && to != reflect.String { + return 5, nil + } + + return v, nil + } + + var result Basic + config := &DecoderConfig{ + DecodeHook: decodeHook, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Vint != 5 { + t.Errorf("vint should be 5: %#v", result.Vint) + } +} + +func TestDecode_DecodeHookType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": "WHAT", + } + + decodeHook := func(from reflect.Type, to reflect.Type, v interface{}) (interface{}, error) { + if from.Kind() == reflect.String && + to.Kind() != reflect.String { + return 5, nil + } + + return v, nil + } + + var result Basic + config := &DecoderConfig{ + DecodeHook: decodeHook, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Vint != 5 { + t.Errorf("vint should be 5: %#v", result.Vint) + } +} + +func TestDecode_Nil(t *testing.T) { + t.Parallel() + + var input interface{} = nil + result := Basic{ + Vstring: "foo", + } + + err := Decode(input, &result) + if err != nil { + t.Fatalf("err: %s", err) + } + + if result.Vstring != "foo" { + t.Fatalf("bad: %#v", result.Vstring) + } +} + +func TestDecode_NilInterfaceHook(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "w": "", + } + + decodeHook := func(f, t reflect.Type, v interface{}) (interface{}, error) { + if t.String() == "io.Writer" { + return nil, nil + } + + return v, nil + } + + var result NilInterface + config := &DecoderConfig{ + DecodeHook: decodeHook, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.W != nil { + t.Errorf("W should be nil: %#v", result.W) + } +} + +func TestDecode_NonStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "bar", + "bar": "baz", + } + + var result map[string]string + err := Decode(input, &result) + if err != nil { + t.Fatalf("err: %s", err) + } + + if result["foo"] != "bar" { + t.Fatal("foo is not bar") + } +} + +func TestDecode_StructMatch(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vbar": Basic{ + Vstring: "foo", + }, + } + + var result Nested + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("bad: %#v", result) + } +} + +func TestDecode_TypeConversion(t *testing.T) { + input := map[string]interface{}{ + "IntToFloat": 42, + "IntToUint": 42, + "IntToBool": 1, + "IntToString": 42, + "UintToInt": 42, + "UintToFloat": 42, + "UintToBool": 42, + "UintToString": 42, + "BoolToInt": true, + "BoolToUint": true, + "BoolToFloat": true, + "BoolToString": true, + "FloatToInt": 42.42, + "FloatToUint": 42.42, + "FloatToBool": 42.42, + "FloatToString": 42.42, + "SliceUint8ToString": []uint8("foo"), + "StringToInt": "42", + "StringToUint": "42", + "StringToBool": "1", + "StringToFloat": "42.42", + "SliceToMap": []interface{}{}, + "MapToSlice": map[string]interface{}{}, + } + + expectedResultStrict := TypeConversionResult{ + IntToFloat: 42.0, + IntToUint: 42, + UintToInt: 42, + UintToFloat: 42, + BoolToInt: 0, + BoolToUint: 0, + BoolToFloat: 0, + FloatToInt: 42, + FloatToUint: 42, + } + + expectedResultWeak := TypeConversionResult{ + IntToFloat: 42.0, + IntToUint: 42, + IntToBool: true, + IntToString: "42", + UintToInt: 42, + UintToFloat: 42, + UintToBool: true, + UintToString: "42", + BoolToInt: 1, + BoolToUint: 1, + BoolToFloat: 1, + BoolToString: "1", + FloatToInt: 42, + FloatToUint: 42, + FloatToBool: true, + FloatToString: "42.42", + SliceUint8ToString: "foo", + StringToInt: 42, + StringToUint: 42, + StringToBool: true, + StringToFloat: 42.42, + SliceToMap: map[string]interface{}{}, + MapToSlice: []interface{}{}, + } + + // Test strict type conversion + var resultStrict TypeConversionResult + err := Decode(input, &resultStrict) + if err == nil { + t.Errorf("should return an error") + } + if !reflect.DeepEqual(resultStrict, expectedResultStrict) { + t.Errorf("expected %v, got: %v", expectedResultStrict, resultStrict) + } + + // Test weak type conversion + var decoder *Decoder + var resultWeak TypeConversionResult + + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &resultWeak, + } + + decoder, err = NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if !reflect.DeepEqual(resultWeak, expectedResultWeak) { + t.Errorf("expected \n%#v, got: \n%#v", expectedResultWeak, resultWeak) + } +} + +func TestDecoder_ErrorUnused(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "hello", + "foo": "bar", + } + + var result Basic + config := &DecoderConfig{ + ErrorUnused: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err == nil { + t.Fatal("expected error") + } +} + +func TestMap(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vother == nil { + t.Fatal("vother should not be nil") + } + + if len(result.Vother) != 2 { + t.Error("vother should have two items") + } + + if result.Vother["foo"] != "foo" { + t.Errorf("'foo' key should be foo, got: %#v", result.Vother["foo"]) + } + + if result.Vother["bar"] != "bar" { + t.Errorf("'bar' key should be bar, got: %#v", result.Vother["bar"]) + } +} + +func TestMapMerge(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + result.Vother = map[string]string{"hello": "world"} + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + expected := map[string]string{ + "foo": "foo", + "bar": "bar", + "hello": "world", + } + if !reflect.DeepEqual(result.Vother, expected) { + t.Errorf("bad: %#v", result.Vother) + } +} + +func TestMapOfStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "value": map[string]interface{}{ + "foo": map[string]string{"vstring": "one"}, + "bar": map[string]string{"vstring": "two"}, + }, + } + + var result MapOfStruct + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Value == nil { + t.Fatal("value should not be nil") + } + + if len(result.Value) != 2 { + t.Error("value should have two items") + } + + if result.Value["foo"].Vstring != "one" { + t.Errorf("foo value should be 'one', got: %s", result.Value["foo"].Vstring) + } + + if result.Value["bar"].Vstring != "two" { + t.Errorf("bar value should be 'two', got: %s", result.Value["bar"].Vstring) + } +} + +func TestNestedType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + }, + } + + var result Nested + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } +} + +func TestNestedTypePointer(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": &map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + }, + } + + var result NestedPointer + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } +} + +func TestSlice(t *testing.T) { + t.Parallel() + + inputStringSlice := map[string]interface{}{ + "vfoo": "foo", + "vbar": []string{"foo", "bar", "baz"}, + } + + inputStringSlicePointer := map[string]interface{}{ + "vfoo": "foo", + "vbar": &[]string{"foo", "bar", "baz"}, + } + + outputStringSlice := &Slice{ + "foo", + []string{"foo", "bar", "baz"}, + } + + testSliceInput(t, inputStringSlice, outputStringSlice) + testSliceInput(t, inputStringSlicePointer, outputStringSlice) +} + +func TestInvalidSlice(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": 42, + } + + result := Slice{} + err := Decode(input, &result) + if err == nil { + t.Errorf("expected failure") + } +} + +func TestSliceOfStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "value": []map[string]interface{}{ + {"vstring": "one"}, + {"vstring": "two"}, + }, + } + + var result SliceOfStruct + err := Decode(input, &result) + if err != nil { + t.Fatalf("got unexpected error: %s", err) + } + + if len(result.Value) != 2 { + t.Fatalf("expected two values, got %d", len(result.Value)) + } + + if result.Value[0].Vstring != "one" { + t.Errorf("first value should be 'one', got: %s", result.Value[0].Vstring) + } + + if result.Value[1].Vstring != "two" { + t.Errorf("second value should be 'two', got: %s", result.Value[1].Vstring) + } +} + +func TestSliceToMap(t *testing.T) { + t.Parallel() + + input := []map[string]interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + map[string]interface{}{ + "bar": "baz", + }, + } + + var result map[string]interface{} + err := WeakDecode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + expected := map[string]interface{}{ + "foo": "bar", + "bar": "baz", + } + if !reflect.DeepEqual(result, expected) { + t.Errorf("bad: %#v", result) + } +} + +func TestInvalidType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": 42, + } + + var result Basic + err := Decode(input, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok := err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "'Vstring' expected type 'string', got unconvertible type 'int'" { + t.Errorf("got unexpected error: %s", err) + } + + inputNegIntUint := map[string]interface{}{ + "vuint": -42, + } + + err = Decode(inputNegIntUint, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok = err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "cannot parse 'Vuint', -42 overflows uint" { + t.Errorf("got unexpected error: %s", err) + } + + inputNegFloatUint := map[string]interface{}{ + "vuint": -42.0, + } + + err = Decode(inputNegFloatUint, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok = err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "cannot parse 'Vuint', -42.000000 overflows uint" { + t.Errorf("got unexpected error: %s", err) + } +} + +func TestMetadata(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "Vuint": 42, + "foo": "bar", + }, + "bar": "nil", + } + + var md Metadata + var result Nested + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + expectedKeys := []string{"Vbar", "Vbar.Vstring", "Vbar.Vuint", "Vfoo"} + sort.Strings(md.Keys) + if !reflect.DeepEqual(md.Keys, expectedKeys) { + t.Fatalf("bad keys: %#v", md.Keys) + } + + expectedUnused := []string{"Vbar.foo", "bar"} + if !reflect.DeepEqual(md.Unused, expectedUnused) { + t.Fatalf("bad unused: %#v", md.Unused) + } +} + +func TestMetadata_Embedded(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var md Metadata + var result EmbeddedSquash + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + expectedKeys := []string{"Vstring", "Vunique"} + + sort.Strings(md.Keys) + if !reflect.DeepEqual(md.Keys, expectedKeys) { + t.Fatalf("bad keys: %#v", md.Keys) + } + + expectedUnused := []string{} + if !reflect.DeepEqual(md.Unused, expectedUnused) { + t.Fatalf("bad unused: %#v", md.Unused) + } +} + +func TestNonPtrValue(t *testing.T) { + t.Parallel() + + err := Decode(map[string]interface{}{}, Basic{}) + if err == nil { + t.Fatal("error should exist") + } + + if err.Error() != "result must be a pointer" { + t.Errorf("got unexpected error: %s", err) + } +} + +func TestTagged(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "bar", + "bar": "value", + } + + var result Tagged + err := Decode(input, &result) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if result.Value != "bar" { + t.Errorf("value should be 'bar', got: %#v", result.Value) + } + + if result.Extra != "value" { + t.Errorf("extra should be 'value', got: %#v", result.Extra) + } +} + +func TestWeakDecode(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "4", + "bar": "value", + } + + var result struct { + Foo int + Bar string + } + + if err := WeakDecode(input, &result); err != nil { + t.Fatalf("err: %s", err) + } + if result.Foo != 4 { + t.Fatalf("bad: %#v", result) + } + if result.Bar != "value" { + t.Fatalf("bad: %#v", result) + } +} + +func testSliceInput(t *testing.T, input map[string]interface{}, expected *Slice) { + var result Slice + err := Decode(input, &result) + if err != nil { + t.Fatalf("got error: %s", err) + } + + if result.Vfoo != expected.Vfoo { + t.Errorf("Vfoo expected '%s', got '%s'", expected.Vfoo, result.Vfoo) + } + + if result.Vbar == nil { + t.Fatalf("Vbar a slice, got '%#v'", result.Vbar) + } + + if len(result.Vbar) != len(expected.Vbar) { + t.Errorf("Vbar length should be %d, got %d", len(expected.Vbar), len(result.Vbar)) + } + + for i, v := range result.Vbar { + if v != expected.Vbar[i] { + t.Errorf( + "Vbar[%d] should be '%#v', got '%#v'", + i, expected.Vbar[i], v) + } + } +} diff --git a/vendor/vendor.json b/vendor/vendor.json new file mode 100644 index 00000000000..64634802f58 --- /dev/null +++ b/vendor/vendor.json @@ -0,0 +1,19 @@ +{ + "comment": "", + "ignore": "", + "package": [ + { + "checksumSHA1": "Y2M44k2lygN97UGTFDyiQJLabDU=", + "path": "github.com/gocarina/gocsv", + "revision": "80ac68b8d188bc11f9bc83b372a83bc65d4e5cde", + "revisionTime": "2016-08-03T06:53:29Z" + }, + { + "checksumSHA1": "Z0I4guD8AejM1hB3ltS/pTS60nQ=", + "path": "github.com/mitchellh/mapstructure", + "revision": "ca63d7c062ee3c9f34db231e352b60012b4fd0c1", + "revisionTime": "2016-08-08T18:12:53Z" + } + ], + "rootPath": "github.com/elastic/beats" +} From 6c0eb6f5df8e6844fd495484f670d6e561428596 Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Sat, 27 Aug 2016 13:28:35 -0400 Subject: [PATCH 10/19] Fixed errors & small changes --- metricbeat/.gitignore | 1 + metricbeat/module/haproxy/_meta/Dockerfile | 2 +- metricbeat/module/haproxy/haproxy.go | 18 ++-- .../module/haproxy/info/_meta/fields.yml | 94 ++++++++++--------- metricbeat/module/haproxy/info/data.go | 16 +++- metricbeat/module/haproxy/info/info.go | 4 +- 6 files changed, 76 insertions(+), 59 deletions(-) diff --git a/metricbeat/.gitignore b/metricbeat/.gitignore index 5a4866e0638..fa460c39290 100644 --- a/metricbeat/.gitignore +++ b/metricbeat/.gitignore @@ -3,3 +3,4 @@ build /metricbeat /metricbeat.test /docs/html_docs +/logs/* \ No newline at end of file diff --git a/metricbeat/module/haproxy/_meta/Dockerfile b/metricbeat/module/haproxy/_meta/Dockerfile index 6fca1419626..4efd57ec6cb 100644 --- a/metricbeat/module/haproxy/_meta/Dockerfile +++ b/metricbeat/module/haproxy/_meta/Dockerfile @@ -1,2 +1,2 @@ FROM haproxy:1.6 -COPY haproxy.conf /usr/local/etc/haproxy/haproxy.cfg \ No newline at end of file +COPY ./haproxy.conf /usr/local/etc/haproxy/haproxy.cfg \ No newline at end of file diff --git a/metricbeat/module/haproxy/haproxy.go b/metricbeat/module/haproxy/haproxy.go index 62e68598cb6..adf352d773c 100644 --- a/metricbeat/module/haproxy/haproxy.go +++ b/metricbeat/module/haproxy/haproxy.go @@ -132,7 +132,7 @@ type Info struct { // Client is an instance of the HAProxy client type Client struct { - connection net.Conn + //connection net.Conn Address string ProtoScheme string } @@ -163,16 +163,17 @@ func (c *Client) run(cmd string) (*bytes.Buffer, error) { if err != nil { return response, err } - c.connection = conn + //c.connection = conn - defer c.connection.Close() + //defer c.connection.Close() + defer conn.Close() - _, err = c.connection.Write([]byte(cmd + "\n")) + _, err = conn.Write([]byte(cmd + "\n")) if err != nil { return response, err } - _, err = io.Copy(response, c.connection) + _, err = io.Copy(response, conn) if err != nil { return response, err } @@ -185,13 +186,14 @@ func (c *Client) run(cmd string) (*bytes.Buffer, error) { } // GetStat returns the result from the 'show stat' command -func (c *Client) GetStat() (statRes []*Stat, err error) { +func (c *Client) GetStat() ([]*Stat, error) { runResult, err := c.run("show stat") if err != nil { return nil, err } + var statRes []*Stat csvReader := csv.NewReader(runResult) csvReader.TrailingComma = true @@ -205,7 +207,7 @@ func (c *Client) GetStat() (statRes []*Stat, err error) { } // GetInfo returns the result from the 'show stat' command -func (c *Client) GetInfo() (infoRes *Info, err error) { +func (c *Client) GetInfo() (*Info, error) { res, err := c.run("show info") if err != nil { @@ -234,7 +236,7 @@ func (c *Client) GetInfo() (infoRes *Info, err error) { var result *Info err := mapstructure.Decode(resultMap, &result) if err != nil { - panic(err) + return nil, err } return result, nil } diff --git a/metricbeat/module/haproxy/info/_meta/fields.yml b/metricbeat/module/haproxy/info/_meta/fields.yml index 031c2adc8e7..5301ffbac64 100644 --- a/metricbeat/module/haproxy/info/_meta/fields.yml +++ b/metricbeat/module/haproxy/info/_meta/fields.yml @@ -4,191 +4,193 @@ General infomration collected on HAProxy process fields: - name: nb_proc - type: intger + type: integer description: > Number of processes - name: process_num - type: intger + type: integer description: > Process number - name: pid - type: intger + type: integer description: > Process ID - name: uptime_sec - type: intger + type: integer description: > Current uptime in seconds - name: mem_max_mb - type: intger + type: integer + format: bytes description: > Max number of memory usage in MB - name: ulimit_n - type: intger + type: integer description: > Max number of open files for process - name: max_sock - type: intger + type: integer description: > - name: max_conn - type: intger + type: integer description: > - name: hard_max_conn - type: intger + type: integer description: > - name: curr_conns - type: intger + type: integer description: > - name: cum_conns - type: intger + type: integer description: > - name: cum_req - type: intger + type: integer description: > - name: max_ssl_conns - type: intger + type: integer description: > - name: curr_ssl_conns - type: intger + type: integer description: > - name: cum_ssl_conns - type: intger + type: integer description: > - name: max_pipes - type: intger + type: integer description: > - name: pipes_used - type: intger + type: integer description: > - name: pipes_free - type: intger + type: integer description: > - name: conn_rate - type: intger + type: integer description: > - name: conn_rate_limit - type: intger + type: integer description: > - name: max_conn_rate - type: intger + type: integer description: > - name: sess_rate - type: intger + type: integer description: > - name: sess_rate_limit - type: intger + type: integer description: > - name: max_sess_rate - type: intger + type: integer description: > - name: ssl_rate - type: intger + type: integer description: > - name: ssl_rate_limit - type: intger + type: integer description: > - name: max_ssl_rate - type: intger + type: integer description: > - name: ssl_frontend_key_rate - type: intger + type: integer description: > - name: ssl_frontend_max_key_rate - type: intger + type: integer description: > - name: ssl_frontend_session_reuse_pct - type: intger + type: integer description: > - name: ssl_babckend_key_rate - type: intger + type: integer description: > - name: ssl_frontend_key_rate - type: intger + type: integer description: > - name: ssl_frontend_max_key_rate - type: intger + type: integer description: > - name: ssl_frontend_session_reuse_pct - type: intger + type: integer description: > - name: ssl_babckend_key_rate - type: intger + type: integer description: > - name: ssl_backend_max_key_rate - type: intger + type: integer description: > - name: ssl_cached_lookups - type: intger + type: integer description: > - name: ssl_cache_misses - type: intger + type: integer description: > - name: compress_bps_in - type: intger + type: integer description: > - name: compress_bps_out - type: intger + type: integer description: > - name: compress_bps_rate_limit - type: intger + type: integer description: > - name: zlib_mem_usage - type: intger + type: integer description: > - name: max_zlib_mem_usage - type: intger + type: integer description: > - name: tasks - type: intger + type: integer description: > - name: run_queue - type: intger + type: integer description: > - name: idle_pct - type: intger + type: scaled_float + format: percent description: > diff --git a/metricbeat/module/haproxy/info/data.go b/metricbeat/module/haproxy/info/data.go index 3df90664b04..c3dce442bca 100644 --- a/metricbeat/module/haproxy/info/data.go +++ b/metricbeat/module/haproxy/info/data.go @@ -6,6 +6,7 @@ import ( s "github.com/elastic/beats/metricbeat/schema" c "github.com/elastic/beats/metricbeat/schema/mapstrstr" "reflect" + "strconv" "strings" ) @@ -52,7 +53,7 @@ var ( "max_zlib_mem_usage": c.Int("MaxZlibMemUsage"), "tasks": c.Int("Tasks"), "run_queue": c.Int("Run_queue"), - "idle_pct": c.Int("Idle_pct"), + "idle_pct": c.Float("Idle_pct"), } ) @@ -76,7 +77,18 @@ func parseResponse(data []byte) map[string]string { if parts[0] == "Name" || parts[0] == "Version" || parts[0] == "Release_date" || parts[0] == "Uptime" || parts[0] == "node" || parts[0] == "description" { continue } - resultMap[parts[0]] = strings.Trim(parts[1], " ") + + if parts[0] == "Idle_pct" { + // Convert this value to a float between 0.0 and 1.0 + f, _ := strconv.ParseFloat(parts[1], 64) + resultMap[parts[0]] = strconv.FormatFloat(f/float64(100), 'f', 2, 64) + } else if parts[0] == "Memmax_MB" { + // Convert this value to bytes + val, _ := strconv.Atoi(strings.Trim(parts[1], " ")) + resultMap[parts[0]] = strconv.Itoa((val * 1024 * 1024)) + } else { + resultMap[parts[0]] = strings.Trim(parts[1], " ") + } } return resultMap } diff --git a/metricbeat/module/haproxy/info/info.go b/metricbeat/module/haproxy/info/info.go index cc6b69fbb11..53c2a96c299 100644 --- a/metricbeat/module/haproxy/info/info.go +++ b/metricbeat/module/haproxy/info/info.go @@ -65,13 +65,13 @@ func (m *MetricSet) Fetch() (common.MapStr, error) { hapc, err := haproxy.NewHaproxyClient(m.statsAddr) if err != nil { - return nil, fmt.Errorf(fmt.Sprintf("HAProxy Client error: %s", err)) + return nil, fmt.Errorf("HAProxy Client error: %s", err) } res, err := hapc.GetInfo() if err != nil { - return nil, fmt.Errorf(fmt.Sprintf("HAProxy Client error fetching %s: %s", statsMethod, err)) + return nil, fmt.Errorf("HAProxy Client error fetching %s: %s", statsMethod, err) } m.counter++ From e2828a8f8f555e24b4d2c492a526d921d74dad7a Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Sat, 27 Aug 2016 13:34:18 -0400 Subject: [PATCH 11/19] Minor fixes --- metricbeat/module/haproxy/haproxy.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/metricbeat/module/haproxy/haproxy.go b/metricbeat/module/haproxy/haproxy.go index adf352d773c..240ee331104 100644 --- a/metricbeat/module/haproxy/haproxy.go +++ b/metricbeat/module/haproxy/haproxy.go @@ -132,7 +132,6 @@ type Info struct { // Client is an instance of the HAProxy client type Client struct { - //connection net.Conn Address string ProtoScheme string } @@ -163,9 +162,7 @@ func (c *Client) run(cmd string) (*bytes.Buffer, error) { if err != nil { return response, err } - //c.connection = conn - //defer c.connection.Close() defer conn.Close() _, err = conn.Write([]byte(cmd + "\n")) @@ -220,17 +217,17 @@ func (c *Client) GetInfo() (*Info, error) { for _, ln := range strings.Split(string(b), "\n") { - ln := strings.Trim(ln, " ") + ln := strings.TrimSpace(ln) if ln == "" { continue } - parts := strings.Split(strings.Trim(ln, " "), ":") + parts := strings.Split(ln, ":") if len(parts) != 2 { continue } - resultMap[parts[0]] = strings.Trim(parts[1], " ") + resultMap[parts[0]] = strings.TrimSpace(parts[1]) } var result *Info From 3e4384e1ac7a0b4b64af5608172cf02321a352af Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Mon, 29 Aug 2016 06:24:12 -0400 Subject: [PATCH 12/19] Fixed field names/values & removed unused function --- metricbeat/module/haproxy/haproxy.go | 96 +++++++++++++------------- metricbeat/module/haproxy/info/data.go | 73 ++++++++------------ metricbeat/module/haproxy/stat/data.go | 6 +- 3 files changed, 79 insertions(+), 96 deletions(-) diff --git a/metricbeat/module/haproxy/haproxy.go b/metricbeat/module/haproxy/haproxy.go index 240ee331104..dbc2433afc0 100644 --- a/metricbeat/module/haproxy/haproxy.go +++ b/metricbeat/module/haproxy/haproxy.go @@ -80,54 +80,54 @@ type Stat struct { } type Info struct { - Name string - Version string - ReleaseDate string - Nbproc string - ProcessNum string - Pid string - Uptime string - UptimeSec string - MemMaxMB string - UlimitN string - Maxsock string - Maxconn string - HardMaxconn string - CurrConns string - CumConns string - CumReq string - MaxSslConns string - CurrSslConns string - CumSslConns string - Maxpipes string - PipesUsed string - PipesFree string - ConnRate string - ConnRateLimit string - MaxConnRate string - SessRate string - SessRateLimit string - MaxSessRate string - SslRate string - SslRateLimit string - MaxSslRate string - SslFrontendKeyRate string - SslFrontendMaxKeyRate string - SslFrontendSessionReusePct string - SslBackendKeyRate string - SslBackendMaxKeyRate string - SslCacheLookups string - SslCacheMisses string - CompressBpsIn string - CompressBpsOut string - CompressBpsRateLim string - ZlibMemUsage string - MaxZlibMemUsage string - Tasks string - RunQueue string - IdlePct string - Node string - Description string + Name string `mapstructure:"Name"` + Version string `mapstructure:"Version"` + ReleaseDate string `mapstructure:"Release_date"` + Nbproc string `mapstructure:"Nbproc"` + ProcessNum string `mapstructure:"Process_num"` + Pid string `mapstructure:"Pid"` + Uptime string `mapstructure:"Uptime"` + UptimeSec string `mapstructure:"Uptime_sec"` + MemMaxMB string `mapstructure:"Memmax_MB"` + UlimitN string `mapstructure:"Ulimit-n"` + Maxsock string `mapstructure:"Maxsock"` + Maxconn string `mapstructure:"Maxconn"` + HardMaxconn string `mapstructure:"Hard_maxconn"` + CurrConns string `mapstructure:"CurrConns"` + CumConns string `mapstructure:"CumConns"` + CumReq string `mapstructure:"CumReq"` + MaxSslConns string `mapstructure:"MaxSslConns"` + CurrSslConns string `mapstructure:"CurrSslConns"` + CumSslConns string `mapstructure:"CumSslConns"` + Maxpipes string `mapstructure:"Maxpipes"` + PipesUsed string `mapstructure:"PipesUsed"` + PipesFree string `mapstructure:"PipesFree"` + ConnRate string `mapstructure:"ConnRate"` + ConnRateLimit string `mapstructure:"ConnRateLimit"` + MaxConnRate string `mapstructure:"MaxConnRate"` + SessRate string `mapstructure:"SessRate"` + SessRateLimit string `mapstructure:"SessRateLimit"` + MaxSessRate string `mapstructure:"MaxSessRate"` + SslRate string `mapstructure:"SslRate"` + SslRateLimit string `mapstructure:"SslRateLimit"` + MaxSslRate string `mapstructure:"MaxSslRate"` + SslFrontendKeyRate string `mapstructure:"SslFrontendKeyRate"` + SslFrontendMaxKeyRate string `mapstructure:"SslFrontendMaxKeyRate"` + SslFrontendSessionReusePct string `mapstructure:"SslFrontendSessionReuse_pct"` + SslBackendKeyRate string `mapstructure:"SslBackendKeyRate"` + SslBackendMaxKeyRate string `mapstructure:"SslBackendMaxKeyRate"` + SslCacheLookups string `mapstructure:"SslCacheLookups"` + SslCacheMisses string `mapstructure:"SslCacheMisses"` + CompressBpsIn string `mapstructure:"CompressBpsIn"` + CompressBpsOut string `mapstructure:"CompressBpsOut"` + CompressBpsRateLim string `mapstructure:"CompressBpsRateLim"` + ZlibMemUsage string `mapstructure:"ZlibMemUsage"` + MaxZlibMemUsage string `mapstructure:"MaxZlibMemUsage"` + Tasks string `mapstructure:"Tasks"` + RunQueue string `mapstructure:"Run_queue"` + IdlePct string `mapstructure:"Idle_pct"` + Node string `mapstructure:"Node"` + Description string `mapstructure:"Description"` } // Client is an instance of the HAProxy client diff --git a/metricbeat/module/haproxy/info/data.go b/metricbeat/module/haproxy/info/data.go index c3dce442bca..488dc6517d7 100644 --- a/metricbeat/module/haproxy/info/data.go +++ b/metricbeat/module/haproxy/info/data.go @@ -2,9 +2,11 @@ package info import ( "github.com/elastic/beats/libbeat/common" + //"github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/module/haproxy" s "github.com/elastic/beats/metricbeat/schema" c "github.com/elastic/beats/metricbeat/schema/mapstrstr" + "reflect" "strconv" "strings" @@ -13,14 +15,14 @@ import ( var ( schema = s.Schema{ "nb_proc": c.Int("Nbproc"), - "process_num": c.Int("Process_num"), + "process_num": c.Int("ProcessNum"), "pid": c.Int("Pid"), - "uptime_sec": c.Int("Uptime_sec"), - "mem_max_mb": c.Int("Memmax_MB"), - "ulimit_n": c.Int("Ulimit-n"), + "uptime_sec": c.Int("UptimeSec"), + "mem_max_mb": c.Int("MemMaxMB"), + "ulimit_n": c.Int("UlimitN"), "max_sock": c.Int("Maxsock"), "max_conn": c.Int("Maxconn"), - "hard_max_conn": c.Int("Hard_maxconn"), + "hard_max_conn": c.Int("HardMaxconn"), "curr_conns": c.Int("CurrConns"), "cum_conns": c.Int("CumConns"), "cum_req": c.Int("CumReq"), @@ -41,7 +43,7 @@ var ( "max_ssl_rate": c.Int("MaxSslRate"), "ssl_frontend_key_rate": c.Int("SslFrontendKeyRate"), "ssl_frontend_max_key_rate": c.Int("SslFrontendMaxKeyRate"), - "ssl_frontend_session_reuse_pct": c.Int("SslFrontendSessionReuse_pct"), + "ssl_frontend_session_reuse_pct": c.Int("SslFrontendSessionReusePct"), "ssl_babckend_key_rate": c.Int("SslBackendKeyRate"), "ssl_backend_max_key_rate": c.Int("SslBackendMaxKeyRate"), "ssl_cached_lookups": c.Int("SslCacheLookups"), @@ -52,47 +54,11 @@ var ( "zlib_mem_usage": c.Int("ZlibMemUsage"), "max_zlib_mem_usage": c.Int("MaxZlibMemUsage"), "tasks": c.Int("Tasks"), - "run_queue": c.Int("Run_queue"), - "idle_pct": c.Float("Idle_pct"), + "run_queue": c.Int("RunQueue"), + "idle_pct": c.Float("IdlePct"), } ) -func parseResponse(data []byte) map[string]string { - - resultMap := map[string]string{} - str := string(data) - - for _, ln := range strings.Split(str, "\n") { - - ln := strings.Trim(ln, " ") - if ln == "" { - continue - } - - parts := strings.Split(strings.Trim(ln, " "), ":") - if len(parts) != 2 { - continue - } - - if parts[0] == "Name" || parts[0] == "Version" || parts[0] == "Release_date" || parts[0] == "Uptime" || parts[0] == "node" || parts[0] == "description" { - continue - } - - if parts[0] == "Idle_pct" { - // Convert this value to a float between 0.0 and 1.0 - f, _ := strconv.ParseFloat(parts[1], 64) - resultMap[parts[0]] = strconv.FormatFloat(f/float64(100), 'f', 2, 64) - } else if parts[0] == "Memmax_MB" { - // Convert this value to bytes - val, _ := strconv.Atoi(strings.Trim(parts[1], " ")) - resultMap[parts[0]] = strconv.Itoa((val * 1024 * 1024)) - } else { - resultMap[parts[0]] = strings.Trim(parts[1], " ") - } - } - return resultMap -} - // Map data to MapStr func eventMapping(info *haproxy.Info) common.MapStr { // Full mapping from info @@ -105,7 +71,24 @@ func eventMapping(info *haproxy.Info) common.MapStr { for i := 0; i < st.NumField(); i++ { f := st.Field(i) - source[typeOfT.Field(i).Name] = f.Interface() + + if typeOfT.Field(i).Name == "IdlePct" { + // Convert this value to a float between 0.0 and 1.0 + fval, err := strconv.ParseFloat(f.Interface().(string), 64) + if err != nil { + panic(err) + } + source[typeOfT.Field(i).Name] = strconv.FormatFloat(fval/float64(100), 'f', 2, 64) + } else if typeOfT.Field(i).Name == "Memmax_MB" { + // Convert this value to bytes + val, err := strconv.Atoi(strings.TrimSpace(f.Interface().(string))) + if err != nil { + panic(err) + } + source[typeOfT.Field(i).Name] = strconv.Itoa((val * 1024 * 1024)) + } else { + source[typeOfT.Field(i).Name] = f.Interface() + } } diff --git a/metricbeat/module/haproxy/stat/data.go b/metricbeat/module/haproxy/stat/data.go index 6450b2e1072..a83ce2e12a8 100644 --- a/metricbeat/module/haproxy/stat/data.go +++ b/metricbeat/module/haproxy/stat/data.go @@ -32,8 +32,8 @@ var ( "weight": c.Int("Weight"), "act": c.Int("Act"), "bck": c.Int("Bck"), - "chkfail": c.Int("Chkfail"), - "chkdown": c.Int("Chkdown"), + "chkfail": c.Int("ChkFail"), + "chkdown": c.Int("ChkDown"), "lastchg": c.Int("Lastchg"), "downtime": c.Int("Downtime"), "qlimit": c.Int("Qlimit"), @@ -66,7 +66,7 @@ var ( "comp_out": c.Int("CompOut"), "comp_byp": c.Int("CompByp"), "comp_rsp": c.Int("CompRsp"), - "lastsess": c.Int("Lastsess"), + "lastsess": c.Int("LastSess"), "last_chk": c.Str("LastChk"), "last_agt": c.Int("LastAgt"), "qtime": c.Int("Qtime"), From 004d9fa885b22d5d7daa7392bdc7d3d99d4a05b1 Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Mon, 29 Aug 2016 06:30:02 -0400 Subject: [PATCH 13/19] Updated filebeat configs --- metricbeat/metricbeat.full.yml | 14 ++++++++------ metricbeat/metricbeat.yml | 2 +- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/metricbeat/metricbeat.full.yml b/metricbeat/metricbeat.full.yml index 2b92f670d85..bf7ec4d3887 100644 --- a/metricbeat/metricbeat.full.yml +++ b/metricbeat/metricbeat.full.yml @@ -62,12 +62,14 @@ metricbeat.modules: #password: test123 #------------------------------- haproxy Module ------------------------------ -- module: haproxy - metricsets: ["stat"] - enabled: true - period: 1s - hosts: ["localhost"] - +#- module: haproxy + #metricsets: + #- "stat" + #- "info" + #enabled: true + #period: 10s + # The address could also be in the form of a unix socket if the metricbeat process is running locally "unix:///var/run/haproxy-stats.sock" + #stats_addr: "tcp://127.0.0.1:14567" #------------------------------- MongoDB Module ------------------------------ #- module: mongodb diff --git a/metricbeat/metricbeat.yml b/metricbeat/metricbeat.yml index 9b80600a365..1d9662a94d3 100644 --- a/metricbeat/metricbeat.yml +++ b/metricbeat/metricbeat.yml @@ -50,7 +50,7 @@ metricbeat.modules: - "info" enabled: true period: 10s - #stats_addr: "unix:///tmp/haproxy-stats.sock" + # The address could also be in the form of a unix socket if the metricbeat process is running locally "unix:///var/run/haproxy-stats.sock" stats_addr: "tcp://127.0.0.1:14567" From 007d4a4028226a879c7496d58aef7645db092eea Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Wed, 31 Aug 2016 06:41:44 -0400 Subject: [PATCH 14/19] Comitting curent changes --- metricbeat/.gitignore | 3 +-- metricbeat/logs/metricbeat | 10 ---------- metricbeat/metricbeat.yml | 14 +------------- metricbeat/module/haproxy/_meta/docs.asciidoc | 7 ++++++- metricbeat/module/haproxy/info/data.go | 1 - metricbeat/module/haproxy/stat/stat.go | 2 -- 6 files changed, 8 insertions(+), 29 deletions(-) delete mode 100644 metricbeat/logs/metricbeat diff --git a/metricbeat/.gitignore b/metricbeat/.gitignore index fa460c39290..5dcd9d08b70 100644 --- a/metricbeat/.gitignore +++ b/metricbeat/.gitignore @@ -2,5 +2,4 @@ build /metricbeat /metricbeat.test -/docs/html_docs -/logs/* \ No newline at end of file +/docs/html_docs \ No newline at end of file diff --git a/metricbeat/logs/metricbeat b/metricbeat/logs/metricbeat deleted file mode 100644 index ee4186e2116..00000000000 --- a/metricbeat/logs/metricbeat +++ /dev/null @@ -1,10 +0,0 @@ -2016-08-10T19:06:01-04:00 INFO Metrics logging every 30s -2016-08-10T19:06:01-04:00 INFO Loading template enabled. Reading template file: /Users/alain.lefebvre/Documents/code/github_personal/beats/metricbeat/metricbeat.template.json -2016-08-10T19:06:01-04:00 INFO Loading template enabled for Elasticsearch 2.x. Reading template file: /Users/alain.lefebvre/Documents/code/github_personal/beats/metricbeat/metricbeat.template-es2x.json -2016-08-10T19:06:01-04:00 INFO Elasticsearch url: http://localhost:9200 -2016-08-10T19:06:01-04:00 INFO Activated elasticsearch as output plugin. -2016-08-10T19:06:01-04:00 INFO Publisher name: Alain-L-MBP -2016-08-10T19:06:01-04:00 INFO Flush Interval set to: 1s -2016-08-10T19:06:01-04:00 INFO Max Bulk Size set to: 50 -2016-08-10T19:06:01-04:00 INFO Register [ModuleFactory:[system], MetricSetFactory:[apache/status, mongodb/status, mysql/status, nginx/stubstatus, redis/info, redis/keyspace, system/core, system/cpu, system/filesystem, system/fsstat, system/load, system/memory, system/network, system/process, zookeeper/mntr]] -2016-08-10T19:06:01-04:00 CRIT Exiting: 1 error: metricset 'haproxy/stat' is not registered, module not found diff --git a/metricbeat/metricbeat.yml b/metricbeat/metricbeat.yml index 1d9662a94d3..faf22ed75e5 100644 --- a/metricbeat/metricbeat.yml +++ b/metricbeat/metricbeat.yml @@ -36,25 +36,13 @@ metricbeat.modules: # Per process stats - process - enabled: false + enabled: true period: 10s processes: ['.*'] # if true, exports the CPU usage in ticks, together with the percentage values cpu_ticks: false -#------------------------------- haproxy Module ------------------------------ -- module: haproxy - metricsets: - - "stat" - - "info" - enabled: true - period: 10s - # The address could also be in the form of a unix socket if the metricbeat process is running locally "unix:///var/run/haproxy-stats.sock" - stats_addr: "tcp://127.0.0.1:14567" - - - #================================ General ===================================== diff --git a/metricbeat/module/haproxy/_meta/docs.asciidoc b/metricbeat/module/haproxy/_meta/docs.asciidoc index 1605700b6de..dbf65dfe4e4 100644 --- a/metricbeat/module/haproxy/_meta/docs.asciidoc +++ b/metricbeat/module/haproxy/_meta/docs.asciidoc @@ -1,4 +1,9 @@ == haproxy Module -This is the haproxy Module. +This is the haproxy Module. To enable stats collection from HAProxy, you must enable the stats socket via TCP. +For example, placing the following statement under the `global` or `default` section of the haproxy config: +`stats socket 0.0.0.0:14567` + +will enable stats reporting via any local IP on port 14567. Please note that you should probably use an internal private IP +or secure this with a firewall rule so that only designated hosts can access this data. diff --git a/metricbeat/module/haproxy/info/data.go b/metricbeat/module/haproxy/info/data.go index 488dc6517d7..e4addeba7b7 100644 --- a/metricbeat/module/haproxy/info/data.go +++ b/metricbeat/module/haproxy/info/data.go @@ -2,7 +2,6 @@ package info import ( "github.com/elastic/beats/libbeat/common" - //"github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/module/haproxy" s "github.com/elastic/beats/metricbeat/schema" c "github.com/elastic/beats/metricbeat/schema/mapstrstr" diff --git a/metricbeat/module/haproxy/stat/stat.go b/metricbeat/module/haproxy/stat/stat.go index cdb21f0ccd7..226553cfbbe 100644 --- a/metricbeat/module/haproxy/stat/stat.go +++ b/metricbeat/module/haproxy/stat/stat.go @@ -1,13 +1,11 @@ package stat import ( - //"errors" "fmt" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/haproxy" - //"net" ) const ( From 498ada4885c7090824a8fe1cb418027be25a7f61 Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Fri, 2 Sep 2016 16:27:28 -0400 Subject: [PATCH 15/19] Applied changes as per comments --- metricbeat/module/haproxy/_meta/docs.asciidoc | 2 +- metricbeat/module/haproxy/haproxy.go | 19 ++++++++++--------- .../module/haproxy/info/_meta/fields.yml | 4 ++-- metricbeat/module/haproxy/info/data.go | 14 ++++++-------- metricbeat/module/haproxy/info/info.go | 6 +++++- metricbeat/module/haproxy/stat/data.go | 3 +-- 6 files changed, 25 insertions(+), 23 deletions(-) diff --git a/metricbeat/module/haproxy/_meta/docs.asciidoc b/metricbeat/module/haproxy/_meta/docs.asciidoc index dbf65dfe4e4..42bd58464a0 100644 --- a/metricbeat/module/haproxy/_meta/docs.asciidoc +++ b/metricbeat/module/haproxy/_meta/docs.asciidoc @@ -3,7 +3,7 @@ This is the haproxy Module. To enable stats collection from HAProxy, you must enable the stats socket via TCP. For example, placing the following statement under the `global` or `default` section of the haproxy config: -`stats socket 0.0.0.0:14567` +`stats socket 127.0.0.1:14567` will enable stats reporting via any local IP on port 14567. Please note that you should probably use an internal private IP or secure this with a firewall rule so that only designated hosts can access this data. diff --git a/metricbeat/module/haproxy/haproxy.go b/metricbeat/module/haproxy/haproxy.go index dbc2433afc0..32ab45465fd 100644 --- a/metricbeat/module/haproxy/haproxy.go +++ b/metricbeat/module/haproxy/haproxy.go @@ -5,12 +5,13 @@ import ( "encoding/csv" "errors" "fmt" - "github.com/gocarina/gocsv" - "github.com/mitchellh/mapstructure" "io" "io/ioutil" "net" "strings" + + "github.com/gocarina/gocsv" + "github.com/mitchellh/mapstructure" ) // Stat is an instance of the HAProxy stat information @@ -88,7 +89,7 @@ type Info struct { Pid string `mapstructure:"Pid"` Uptime string `mapstructure:"Uptime"` UptimeSec string `mapstructure:"Uptime_sec"` - MemMaxMB string `mapstructure:"Memmax_MB"` + MemMax string `mapstructure:"Memmax_MB"` UlimitN string `mapstructure:"Ulimit-n"` Maxsock string `mapstructure:"Maxsock"` Maxconn string `mapstructure:"Maxconn"` @@ -140,11 +141,11 @@ type Client struct { func NewHaproxyClient(address string) (*Client, error) { parts := strings.Split(address, "://") if len(parts) != 2 { - return nil, errors.New("Must have protocol scheme and address!") + return nil, errors.New("must have protocol scheme and address") } if parts[0] != "tcp" && parts[0] != "unix" { - return nil, errors.New("Invalid Protocol Scheme!") + return nil, errors.New("invalid protocol scheme") } return &Client{ @@ -176,7 +177,7 @@ func (c *Client) run(cmd string) (*bytes.Buffer, error) { } if strings.HasPrefix(response.String(), "Unknown command") { - return response, fmt.Errorf("Unknown command: %s", cmd) + return response, fmt.Errorf("unknown command: %s", cmd) } return response, nil @@ -196,7 +197,7 @@ func (c *Client) GetStat() ([]*Stat, error) { err = gocsv.UnmarshalCSV(csvReader, &statRes) if err != nil { - return nil, fmt.Errorf("Error parsing CSV: %s", err) + return nil, fmt.Errorf("error parsing CSV: %s", err) } return statRes, nil @@ -231,8 +232,8 @@ func (c *Client) GetInfo() (*Info, error) { } var result *Info - err := mapstructure.Decode(resultMap, &result) - if err != nil { + + if err := mapstructure.Decode(resultMap, &result); err != nil { return nil, err } return result, nil diff --git a/metricbeat/module/haproxy/info/_meta/fields.yml b/metricbeat/module/haproxy/info/_meta/fields.yml index 5301ffbac64..5025b2c4886 100644 --- a/metricbeat/module/haproxy/info/_meta/fields.yml +++ b/metricbeat/module/haproxy/info/_meta/fields.yml @@ -23,11 +23,11 @@ description: > Current uptime in seconds - - name: mem_max_mb + - name: mem_max_bytes type: integer format: bytes description: > - Max number of memory usage in MB + Max number of memory usage in bytes (The 'Memmax_MB' value converted to bytes) - name: ulimit_n type: integer diff --git a/metricbeat/module/haproxy/info/data.go b/metricbeat/module/haproxy/info/data.go index e4addeba7b7..e6bb63c1cf1 100644 --- a/metricbeat/module/haproxy/info/data.go +++ b/metricbeat/module/haproxy/info/data.go @@ -17,7 +17,7 @@ var ( "process_num": c.Int("ProcessNum"), "pid": c.Int("Pid"), "uptime_sec": c.Int("UptimeSec"), - "mem_max_mb": c.Int("MemMaxMB"), + "mem_max_bytes": c.Int("MemMax"), "ulimit_n": c.Int("UlimitN"), "max_sock": c.Int("Maxsock"), "max_conn": c.Int("Maxconn"), @@ -59,14 +59,12 @@ var ( ) // Map data to MapStr -func eventMapping(info *haproxy.Info) common.MapStr { +func eventMapping(info *haproxy.Info) (common.MapStr, error) { // Full mapping from info - source := map[string]interface{}{} - st := reflect.ValueOf(info).Elem() typeOfT := st.Type() - source = map[string]interface{}{} + source := map[string]interface{}{} for i := 0; i < st.NumField(); i++ { f := st.Field(i) @@ -75,14 +73,14 @@ func eventMapping(info *haproxy.Info) common.MapStr { // Convert this value to a float between 0.0 and 1.0 fval, err := strconv.ParseFloat(f.Interface().(string), 64) if err != nil { - panic(err) + return nil, err } source[typeOfT.Field(i).Name] = strconv.FormatFloat(fval/float64(100), 'f', 2, 64) } else if typeOfT.Field(i).Name == "Memmax_MB" { // Convert this value to bytes val, err := strconv.Atoi(strings.TrimSpace(f.Interface().(string))) if err != nil { - panic(err) + return nil, err } source[typeOfT.Field(i).Name] = strconv.Itoa((val * 1024 * 1024)) } else { @@ -91,5 +89,5 @@ func eventMapping(info *haproxy.Info) common.MapStr { } - return schema.Apply(source) + return schema.Apply(source), nil } diff --git a/metricbeat/module/haproxy/info/info.go b/metricbeat/module/haproxy/info/info.go index 53c2a96c299..9fb7601f748 100644 --- a/metricbeat/module/haproxy/info/info.go +++ b/metricbeat/module/haproxy/info/info.go @@ -75,6 +75,10 @@ func (m *MetricSet) Fetch() (common.MapStr, error) { } m.counter++ - return eventMapping(res), nil + mappedEvent, err := eventMapping(res) + if err != nil { + return nil, err + } + return mappedEvent, nil } diff --git a/metricbeat/module/haproxy/stat/data.go b/metricbeat/module/haproxy/stat/data.go index a83ce2e12a8..944b3a87b83 100644 --- a/metricbeat/module/haproxy/stat/data.go +++ b/metricbeat/module/haproxy/stat/data.go @@ -119,12 +119,11 @@ func parseResponse(data []byte) []map[string]string { func eventMapping(info []*haproxy.Stat) []common.MapStr { var events []common.MapStr - source := map[string]interface{}{} for _, evt := range info { st := reflect.ValueOf(evt).Elem() typeOfT := st.Type() - source = map[string]interface{}{} + source := map[string]interface{}{} for i := 0; i < st.NumField(); i++ { f := st.Field(i) From e9897189597633d863450c518151334d8d73ec9a Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Sat, 3 Sep 2016 16:23:33 -0400 Subject: [PATCH 16/19] Updated structure to follow event convetions --- .../module/haproxy/info/_meta/fields.yml | 340 ++++++++++-------- metricbeat/module/haproxy/info/data.go | 125 ++++--- .../module/haproxy/stat/_meta/fields.yml | 259 +++++++------ metricbeat/module/haproxy/stat/data.go | 147 ++++---- 4 files changed, 514 insertions(+), 357 deletions(-) diff --git a/metricbeat/module/haproxy/info/_meta/fields.yml b/metricbeat/module/haproxy/info/_meta/fields.yml index 5025b2c4886..4eeea5bfb02 100644 --- a/metricbeat/module/haproxy/info/_meta/fields.yml +++ b/metricbeat/module/haproxy/info/_meta/fields.yml @@ -34,154 +34,210 @@ description: > Max number of open files for process - - name: max_sock - type: integer - description: > - - - name: max_conn - type: integer - description: > - - - name: hard_max_conn - type: integer - description: > - - - name: curr_conns - type: integer - description: > - - - name: cum_conns - type: integer - description: > - - - name: cum_req - type: integer - description: > - - - name: max_ssl_conns - type: integer - description: > - - - name: curr_ssl_conns - type: integer - description: > - - - name: cum_ssl_conns - type: integer - description: > - - - name: max_pipes - type: integer - description: > - - - name: pipes_used - type: integer - description: > + - name: compress + type: group + description: > + + fields: + - name: bps + type: group + description: > + + fields: + - name: in + type: integer + description: > + + - name: out + type: integer + description: > + + - name: rate_limit + type: integer + description: > + + - name: conn + type: group + description: > + + fields: + - name: rate + type: group + description: > + + fields: + - name: value + type: integer + description: > + + - name: limit + type: integer + description: > + + - name: curr + type: group + description: > + + fields: + - name: conns + type: integer + description: > + + - name: ssl_conns + type: integer + description: > + + - name: cum + type: group + description: > + + fields: + - name: conns + type: integer + description: > + + - name: req + type: integer + description: > + + - name: ssl_conns + type: integer + description: > + + + - name: max + type: group + description: > + + fields: + - name: hard_conn + type: integer + description: > + + - name: sock + type: integer + description: > + + - name: conn + type: integer + description: > + + - name: ssl_conns + type: integer + description: > + + - name: pipes + type: integer + description: > + + - name: conn_rate + type: integer + description: > - - name: pipes_free - type: integer - description: > - - - name: conn_rate - type: integer - description: > - - - name: conn_rate_limit - type: integer - description: > - - - name: max_conn_rate - type: integer - description: > - - - name: sess_rate - type: integer - description: > - - - name: sess_rate_limit - type: integer - description: > - - - name: max_sess_rate - type: integer - description: > - - - name: ssl_rate - type: integer - description: > - - - name: ssl_rate_limit - type: integer - description: > - - - name: max_ssl_rate - type: integer - description: > - - - name: ssl_frontend_key_rate - type: integer - description: > - - - name: ssl_frontend_max_key_rate - type: integer - description: > - - - name: ssl_frontend_session_reuse_pct - type: integer - description: > - - - name: ssl_babckend_key_rate - type: integer - description: > - - - name: ssl_frontend_key_rate - type: integer - description: > - - - name: ssl_frontend_max_key_rate - type: integer - description: > - - - name: ssl_frontend_session_reuse_pct - type: integer - description: > - - - name: ssl_babckend_key_rate - type: integer - description: > - - - name: ssl_backend_max_key_rate - type: integer - description: > - - - name: ssl_cached_lookups - type: integer - description: > - - - name: ssl_cache_misses - type: integer - description: > - - - name: compress_bps_in - type: integer - description: > - - - name: compress_bps_out - type: integer - description: > - - - name: compress_bps_rate_limit - type: integer - description: > + - name: sess_rate + type: integer + description: > + + - name: ssl_rate + type: integer + description: > + + - name: zlib_mem_usage + type: integer + description: > + + - name: pipes + type: group + description: > + + fields: + - name: used + type: integer + description: > + + - name: free + type: integer + description: > + + + - name: sess + type: group + description: > + + fields: + - name: rate + type: group + description: > + + fields: + - name: value + type: integer + description: > + + - name: limit + type: integer + description: > + + - name: ssl + type: group + description: > + + fields: + - name: rate + type: group + description: > + + fields: + - name: value + type: integer + description: > + + - name: limit + type: integer + description: > + + - name: frontend + type: group + description: > + + fields: + - name: key_rate + type: integer + description: > + + - name: max_key_rate + type: integer + description: > + + - name: session_reuse_pct + type: integer + description: > + + - name: backend + type: group + description: > + + fields: + - name: key_rate + type: integer + description: > + + - name: max_key_rate + type: integer + description: > + + - name: ssl_cached_lookups + type: integer + description: > + + - name: ssl_cache_misses + type: integer + description: > - name: zlib_mem_usage type: integer description: > - - name: max_zlib_mem_usage - type: integer - description: > - - name: tasks type: integer description: > diff --git a/metricbeat/module/haproxy/info/data.go b/metricbeat/module/haproxy/info/data.go index e6bb63c1cf1..97f2d709d20 100644 --- a/metricbeat/module/haproxy/info/data.go +++ b/metricbeat/module/haproxy/info/data.go @@ -13,48 +13,89 @@ import ( var ( schema = s.Schema{ - "nb_proc": c.Int("Nbproc"), - "process_num": c.Int("ProcessNum"), - "pid": c.Int("Pid"), - "uptime_sec": c.Int("UptimeSec"), - "mem_max_bytes": c.Int("MemMax"), - "ulimit_n": c.Int("UlimitN"), - "max_sock": c.Int("Maxsock"), - "max_conn": c.Int("Maxconn"), - "hard_max_conn": c.Int("HardMaxconn"), - "curr_conns": c.Int("CurrConns"), - "cum_conns": c.Int("CumConns"), - "cum_req": c.Int("CumReq"), - "max_ssl_conns": c.Int("MaxSslConns"), - "curr_ssl_conns": c.Int("CurrSslConns"), - "cum_ssl_conns": c.Int("CumSslConns"), - "max_pipes": c.Int("Maxpipes"), - "pipes_used": c.Int("PipesUsed"), - "pipes_free": c.Int("PipesFree"), - "conn_rate": c.Int("ConnRate"), - "conn_rate_limit": c.Int("ConnRateLimit"), - "max_conn_rate": c.Int("MaxConnRate"), - "sess_rate": c.Int("SessRate"), - "sess_rate_limit": c.Int("SessRateLimit"), - "max_sess_rate": c.Int("MaxSessRate"), - "ssl_rate": c.Int("SslRate"), - "ssl_rate_limit": c.Int("SslRateLimit"), - "max_ssl_rate": c.Int("MaxSslRate"), - "ssl_frontend_key_rate": c.Int("SslFrontendKeyRate"), - "ssl_frontend_max_key_rate": c.Int("SslFrontendMaxKeyRate"), - "ssl_frontend_session_reuse_pct": c.Int("SslFrontendSessionReusePct"), - "ssl_babckend_key_rate": c.Int("SslBackendKeyRate"), - "ssl_backend_max_key_rate": c.Int("SslBackendMaxKeyRate"), - "ssl_cached_lookups": c.Int("SslCacheLookups"), - "ssl_cache_misses": c.Int("SslCacheMisses"), - "compress_bps_in": c.Int("CompressBpsIn"), - "compress_bps_out": c.Int("CompressBpsOut"), - "compress_bps_rate_limit": c.Int("CompressBpsRateLim"), - "zlib_mem_usage": c.Int("ZlibMemUsage"), - "max_zlib_mem_usage": c.Int("MaxZlibMemUsage"), - "tasks": c.Int("Tasks"), - "run_queue": c.Int("RunQueue"), - "idle_pct": c.Float("IdlePct"), + "nb_proc": c.Int("Nbproc"), + "process_num": c.Int("ProcessNum"), + "pid": c.Int("Pid"), + "uptime_sec": c.Int("UptimeSec"), + "mem_max_bytes": c.Int("MemMax"), + "ulimit_n": c.Int("UlimitN"), + + "compress": s.Object{ + "bps": s.Object{ + "in": c.Int("CompressBpsIn"), + "out": c.Int("CompressBpsOut"), + "rate_limit": c.Int("CompressBpsRateLim"), + }, + }, + + "conn": s.Object{ + "rate": s.Object{ + "value": c.Int("ConnRate"), + "limit": c.Int("ConnRateLimit"), + }, + }, + + "curr": s.Object{ + "conns": c.Int("CurrConns"), + "ssl_conns": c.Int("CurrSslConns"), + }, + + "cum": s.Object{ + "conns": c.Int("CumConns"), + "req": c.Int("CumReq"), + "ssl_conns": c.Int("CumSslConns"), + }, + + "max": s.Object{ + "hard_conn": c.Int("HardMaxconn"), + "ssl": s.Object{ + "conns": c.Int("MaxSslConns"), + "rate": c.Int("MaxSslRate"), + }, + "sock": c.Int("Maxsock"), + "conn": s.Object{ + "value": c.Int("Maxconn"), + "rate": c.Int("MaxConnRate"), + }, + "sess_rate": c.Int("MaxSessRate"), + "pipes": c.Int("Maxpipes"), + "zlib_mem_usage": c.Int("MaxZlibMemUsage"), + }, + + "pipes": s.Object{ + "used": c.Int("PipesUsed"), + "free": c.Int("PipesFree"), + }, + + "sess": s.Object{ + "rate": s.Object{ + "value": c.Int("SessRate"), + "limit": c.Int("SessRateLimit"), + }, + }, + + "ssl": s.Object{ + "rate": s.Object{ + "value": c.Int("SslRate"), + "limit": c.Int("SslRateLimit"), + }, + "frontend": s.Object{ + "key_rate": c.Int("SslFrontendKeyRate"), + "max_key_rate": c.Int("SslFrontendMaxKeyRate"), + "session_reuse_pct": c.Int("SslFrontendSessionReusePct"), + }, + "backend": s.Object{ + "key_rate": c.Int("SslBackendKeyRate"), + "max_key_rate": c.Int("SslBackendMaxKeyRate"), + }, + "cached_lookups": c.Int("SslCacheLookups"), + "cache_misses": c.Int("SslCacheMisses"), + }, + + "zlib_mem_usage": c.Int("ZlibMemUsage"), + "tasks": c.Int("Tasks"), + "run_queue": c.Int("RunQueue"), + "idle_pct": c.Float("IdlePct"), } ) diff --git a/metricbeat/module/haproxy/stat/_meta/fields.yml b/metricbeat/module/haproxy/stat/_meta/fields.yml index ca43e8f4283..60d331f8222 100644 --- a/metricbeat/module/haproxy/stat/_meta/fields.yml +++ b/metricbeat/module/haproxy/stat/_meta/fields.yml @@ -195,100 +195,126 @@ (0=frontend, 1=backend, 2=server, 3=socket/listener) - name: rate - type: integer - description: > - number of sessions per second over last elapsed second - - - name: rate_lim - type: integer - description: > - configured limit on new sessions per second - - - name: rate_max - type: integer - description: > - max number of new sessions per second - - - name: check_status - type: string - description: > - status of last health check, one of: - UNK -> unknown - INI -> initializing - SOCKERR -> socket error - L4OK -> check passed on layer 4, no upper layers testing enabled - L4TOUT -> layer 1-4 timeout - L4CON -> layer 1-4 connection problem, for example - "Connection refused" (tcp rst) or "No route to host" (icmp) - L6OK -> check passed on layer 6 - L6TOUT -> layer 6 (SSL) timeout - L6RSP -> layer 6 invalid response - protocol error - L7OK -> check passed on layer 7 - L7OKC -> check conditionally passed on layer 7, for example 404 with - disable-on-404 - L7TOUT -> layer 7 (HTTP/SMTP) timeout - L7RSP -> layer 7 invalid response - protocol error - L7STS -> layer 7 response error, for example HTTP 5xx - - - name: check_code - type: integer - description: > - layer5-7 code, if available - - - name: check_duration - type: integer - description: > - time in ms took to finish last health check - - - name: hrsp_1xx - type: integer - description: > - http responses with 1xx code - - - name: hrsp_2xx - type: integer - description: > - http responses with 2xx code - - - name: hrsp_3xx - type: integer - description: > - http responses with 3xx code - - - name: hrsp_4xx - type: integer - description: > - http responses with 4xx code - - - name: hrsp_5xx - type: integer - description: > - http responses with 5xx code - - - name: hrsp_other - type: integer - description: > - http responses with other codes (protocol error) + type: group + description: > + + fields: + - name: value + type: integer + description: > + number of sessions per second over last elapsed second + + - name: lim + type: integer + description: > + configured limit on new sessions per second + + - name: max + type: integer + description: > + max number of new sessions per second + + + - name: check + type: group + description: > + + fields: + - name: status + type: string + description: > + status of last health check, one of: + UNK -> unknown + INI -> initializing + SOCKERR -> socket error + L4OK -> check passed on layer 4, no upper layers testing enabled + L4TOUT -> layer 1-4 timeout + L4CON -> layer 1-4 connection problem, for example + "Connection refused" (tcp rst) or "No route to host" (icmp) + L6OK -> check passed on layer 6 + L6TOUT -> layer 6 (SSL) timeout + L6RSP -> layer 6 invalid response - protocol error + L7OK -> check passed on layer 7 + L7OKC -> check conditionally passed on layer 7, for example 404 with + disable-on-404 + L7TOUT -> layer 7 (HTTP/SMTP) timeout + L7RSP -> layer 7 invalid response - protocol error + L7STS -> layer 7 response error, for example HTTP 5xx + + - name: code + type: integer + description: > + layer5-7 code, if available + + - name: duration + type: integer + description: > + time in ms took to finish last health check + + - name: hrsp + type: group + description: > + + fields: + - name: 1xx + type: integer + description: > + http responses with 1xx code + + - name: 2xx + type: integer + description: > + http responses with 2xx code + + - name: 3xx + type: integer + description: > + http responses with 3xx code + + - name: 4xx + type: integer + description: > + http responses with 4xx code + + - name: 5xx + type: integer + description: > + http responses with 5xx code + + - name: other + type: integer + description: > + http responses with other codes (protocol error) - name: hanafail type: integer description: > failed health checks details - - name: req_rate - type: integer + - name: req + type: group description: > - HTTP requests per second over last elapsed second - - name: req_rate_max - type: integer - description: > - max number of HTTP requests per second observed + fields: + - name: rate + type: group + description: > - - name: req_tot - type: integer - description: > - total number of HTTP requests received + fields: + - name: value + type: integer + description: > + HTTP requests per second over last elapsed second + + - name: max + type: integer + description: > + max number of HTTP requests per second observed + + - name: tot + type: integer + description: > + total number of HTTP requests received - name: cli_abrt type: integer @@ -300,40 +326,51 @@ description: > number of data transfers aborted by the server (inc. in eresp) - - name: comp_in - type: integer + - name: comp + type: group description: > - number of HTTP response bytes fed to the compressor - - name: comp_out - type: integer - description: > - number of HTTP response bytes emitted by the compressor + fields: + - name: in + type: integer + description: > + number of HTTP response bytes fed to the compressor - - name: comp_byp - type: integer - description: > - number of bytes that bypassed the HTTP compressor (CPU/BW limit) + - name: out + type: integer + description: > + number of HTTP response bytes emitted by the compressor - - name: comp_rsp - type: integer - description: > - number of HTTP responses that were compressed + - name: byp + type: integer + description: > + number of bytes that bypassed the HTTP compressor (CPU/BW limit) - - name: lastsess - type: integer - description: > - number of seconds since last session assigned to server/backend + - name: rsp + type: integer + description: > + number of HTTP responses that were compressed - - name: last_chk - type: string + - name: last + type: group description: > - last health check contents or textual error - - name: last_agt - type: integer - description: > - llast agent check contents or textual error + fields: + - name: sess + type: integer + description: > + number of seconds since last session assigned to server/backend + + - name: chk + type: string + description: > + last health check contents or textual error + + - name: agt + type: integer + description: > + llast agent check contents or textual error + - name: qtime type: integer diff --git a/metricbeat/module/haproxy/stat/data.go b/metricbeat/module/haproxy/stat/data.go index 944b3a87b83..6cc0c0282d7 100644 --- a/metricbeat/module/haproxy/stat/data.go +++ b/metricbeat/module/haproxy/stat/data.go @@ -11,68 +11,91 @@ import ( var ( schema = s.Schema{ - "pxname": c.Str("PxName"), - "svname": c.Str("SvName"), - "qcur": c.Int("Qcur"), - "qmax": c.Int("Qmax"), - "scur": c.Int("Scur"), - "smax": c.Int("Smax"), - "slim": c.Int("Slim"), - "stot": c.Int("Stot"), - "bin": c.Int("Bin"), - "bout": c.Int("Bout"), - "dreq": c.Int("Dreq"), - "dresp": c.Int("Dresp"), - "ereq": c.Int("Ereq"), - "econ": c.Int("Econ"), - "eresp": c.Int("Eresp"), - "wretr": c.Int("Wretr"), - "wredis": c.Int("Wredis"), - "status": c.Str("Status"), - "weight": c.Int("Weight"), - "act": c.Int("Act"), - "bck": c.Int("Bck"), - "chkfail": c.Int("ChkFail"), - "chkdown": c.Int("ChkDown"), - "lastchg": c.Int("Lastchg"), - "downtime": c.Int("Downtime"), - "qlimit": c.Int("Qlimit"), - "pid": c.Int("Pid"), - "iid": c.Int("Iid"), - "sid": c.Int("Sid"), - "throttle": c.Int("Throttle"), - "lbtot": c.Int("Lbtot"), - "tracked": c.Int("Tracked"), - "type": c.Int("Type"), - "rate": c.Int("Rate"), - "rate_lim": c.Int("RateLim"), - "rate_max": c.Int("RateMax"), - "check_status": c.Str("CheckStatus"), - "check_code": c.Int("CheckCode"), - "check_duration": c.Int("CheckDuration"), - "hrsp_1xx": c.Int("Hrsp1xx"), - "hrsp_2xx": c.Int("Hrsp2xx"), - "hrsp_3xx": c.Int("Hrsp3xx"), - "hrsp_4xx": c.Int("Hrsp4xx"), - "hrsp_5xx": c.Int("Hrsp5xx"), - "hrsp_other": c.Int("HrspOther"), - "hanafail": c.Int("Hanafail"), - "req_rate": c.Int("ReqRate"), - "req_rate_max": c.Int("ReqRateMax"), - "req_tot": c.Int("ReqTot"), - "cli_abrt": c.Int("CliAbrt"), - "srv_abrt": c.Int("SrvAbrt"), - "comp_in": c.Int("CompIn"), - "comp_out": c.Int("CompOut"), - "comp_byp": c.Int("CompByp"), - "comp_rsp": c.Int("CompRsp"), - "lastsess": c.Int("LastSess"), - "last_chk": c.Str("LastChk"), - "last_agt": c.Int("LastAgt"), - "qtime": c.Int("Qtime"), - "ctime": c.Int("Ctime"), - "rtime": c.Int("Rtime"), - "ttime": c.Int("Ttime"), + "pxname": c.Str("PxName"), + "svname": c.Str("SvName"), + "qcur": c.Int("Qcur"), + "qmax": c.Int("Qmax"), + "scur": c.Int("Scur"), + "smax": c.Int("Smax"), + "slim": c.Int("Slim"), + "stot": c.Int("Stot"), + "bin": c.Int("Bin"), + "bout": c.Int("Bout"), + "dreq": c.Int("Dreq"), + "dresp": c.Int("Dresp"), + "ereq": c.Int("Ereq"), + "econ": c.Int("Econ"), + "eresp": c.Int("Eresp"), + "wretr": c.Int("Wretr"), + "wredis": c.Int("Wredis"), + "status": c.Str("Status"), + "weight": c.Int("Weight"), + "act": c.Int("Act"), + "bck": c.Int("Bck"), + "chkfail": c.Int("ChkFail"), + "chkdown": c.Int("ChkDown"), + "lastchg": c.Int("Lastchg"), + "downtime": c.Int("Downtime"), + "qlimit": c.Int("Qlimit"), + "pid": c.Int("Pid"), + "iid": c.Int("Iid"), + "sid": c.Int("Sid"), + "throttle": c.Int("Throttle"), + "lbtot": c.Int("Lbtot"), + "tracked": c.Int("Tracked"), + "type": c.Int("Type"), + + "rate": s.Object{ + "value": c.Int("Rate"), + "lim": c.Int("RateLim"), + "max": c.Int("RateMax"), + }, + + "check": s.Object{ + "status": c.Str("CheckStatus"), + "code": c.Int("CheckCode"), + "duration": c.Int("CheckDuration"), + }, + + "hrsp": s.Object{ + "1xx": c.Int("Hrsp1xx"), + "2xx": c.Int("Hrsp2xx"), + "3xx": c.Int("Hrsp3xx"), + "4xx": c.Int("Hrsp4xx"), + "5xx": c.Int("Hrsp5xx"), + "other": c.Int("HrspOther"), + }, + + "hanafail": c.Int("Hanafail"), + + "req": s.Object{ + "rate": s.Object{ + "value": c.Int("ReqRate"), + "max": c.Int("ReqRateMax"), + }, + "tot": c.Int("ReqTot"), + }, + + "cli_abrt": c.Int("CliAbrt"), + "srv_abrt": c.Int("SrvAbrt"), + + "comp": s.Object{ + "in": c.Int("CompIn"), + "out": c.Int("CompOut"), + "byp": c.Int("CompByp"), + "rsp": c.Int("CompRsp"), + }, + + "last": s.Object{ + "sess": c.Int("LastSess"), + "chk": c.Str("LastChk"), + "agt": c.Int("LastAgt"), + }, + + "qtime": c.Int("Qtime"), + "ctime": c.Int("Ctime"), + "rtime": c.Int("Rtime"), + "ttime": c.Int("Ttime"), } ) From 607c564e798a628f77fd7f8416f6f477655064ee Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Sun, 4 Sep 2016 12:59:56 -0400 Subject: [PATCH 17/19] Fixed index templates and data type --- metricbeat/metricbeat.template-es2x.json | 434 +++++++++--------- metricbeat/metricbeat.template.json | 434 +++++++++--------- metricbeat/metricbeat.yml | 13 +- .../module/haproxy/stat/_meta/fields.yml | 4 +- metricbeat/module/haproxy/stat/data.go | 2 +- 5 files changed, 445 insertions(+), 442 deletions(-) diff --git a/metricbeat/metricbeat.template-es2x.json b/metricbeat/metricbeat.template-es2x.json index c71344e5c41..73e3335f2cb 100644 --- a/metricbeat/metricbeat.template-es2x.json +++ b/metricbeat/metricbeat.template-es2x.json @@ -508,234 +508,230 @@ } }, "stat": { - "pxname": { - "ignore_above": 1024, - "index": "not_analyzed", - "type": "string" - }, - "svname": { - "ignore_above": 1024, - "index": "not_analyzed", - "type": "string" - }, - "qcur": { - "type": "long" - }, - "qmax": { - "type": "long" - }, - "scur": { - "type": "long" - }, - "smax": { - "type": "long" - }, - "slim": { - "type": "long" - }, - "stot": { - "type": "long" - }, - "bin": { - "type": "long" - }, - "bout": { - "type": "long" - }, - "breq": { - "type": "long" - }, - "dresp": { - "type": "long" - }, - "ereq": { - "type": "long" - }, - "econ": { - "type": "long" - }, - "eresp": { - "type": "long" - }, - "wretr": { - "type": "long" - }, - "wredis": { - "type": "long" - }, - "status": { - "ignore_above": 1024, - "index": "not_analyzed", - "type": "string" - }, - "weight": { - "type": "long" - }, - "act": { - "type": "long" - }, - "bck": { - "type": "long" - }, - "chkfail": { - "type": "long" - }, - "chkdown": { - "type": "long" - }, - "lastchg": { - "type": "long" - }, - "downtime": { - "type": "long" - }, - "qlimit": { - "type": "long" - }, - "pid": { - "type": "integer" - }, - "iid": { - "type": "integer" - }, - "throttle": { - "type": "integer" - }, - "lbtot": { - "type": "long" - }, - "tracked": { - "type": "integer" - }, - "component_type": { - "type": "integer" - }, - - "rate": { - "properties": { - "value": { - "type": "long" - }, - "lim": { - "type": "long" - }, - "max": { - "type": "long" + "properties": { + "pxname": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "svname": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "qcur": { + "type": "long" + }, + "qmax": { + "type": "long" + }, + "scur": { + "type": "long" + }, + "smax": { + "type": "long" + }, + "slim": { + "type": "long" + }, + "stot": { + "type": "long" + }, + "bin": { + "type": "long" + }, + "bout": { + "type": "long" + }, + "breq": { + "type": "long" + }, + "dresp": { + "type": "long" + }, + "ereq": { + "type": "long" + }, + "econ": { + "type": "long" + }, + "eresp": { + "type": "long" + }, + "wretr": { + "type": "long" + }, + "wredis": { + "type": "long" + }, + "status": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "weight": { + "type": "long" + }, + "act": { + "type": "long" + }, + "bck": { + "type": "long" + }, + "chkfail": { + "type": "long" + }, + "chkdown": { + "type": "long" + }, + "lastchg": { + "type": "long" + }, + "downtime": { + "type": "long" + }, + "qlimit": { + "type": "long" + }, + "pid": { + "type": "integer" + }, + "iid": { + "type": "integer" + }, + "throttle": { + "type": "integer" + }, + "lbtot": { + "type": "long" + }, + "tracked": { + "type": "integer" + }, + "component_type": { + "type": "integer" + }, + "rate": { + "properties": { + "value": { + "type": "long" + }, + "lim": { + "type": "long" + }, + "max": { + "type": "long" + } } - } - }, - - "check": { - "properties": { - "stats": { - "ignore_above": 1024, - "index": "not_analyzed", - "type": "string" - }, - "code": { - "type": "integer" - }, - "duration": { - "type": "long" + }, + "check": { + "properties": { + "stats": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "code": { + "type": "integer" + }, + "duration": { + "type": "long" + } } - } - }, - - "hrsp": { - "properties": { - "1xx": { - "type": "long" - }, - "2xx": { - "type": "long" - }, - "3xx": { - "type": "long" - }, - "4xx": { - "type": "long" - }, - "5xx": { - "type": "long" - }, - "other": { - "type": "long" + }, + "hrsp": { + "properties": { + "1xx": { + "type": "long" + }, + "2xx": { + "type": "long" + }, + "3xx": { + "type": "long" + }, + "4xx": { + "type": "long" + }, + "5xx": { + "type": "long" + }, + "other": { + "type": "long" + } } - } - }, - - "hanafail": { - "type": "long" - }, - - "req": { - "properties": { - "rate": { - "properties": { - "value": { - "type": "long" - }, - "max": { - "type": "long" + }, + "hanafail": { + "type": "long" + }, + "req": { + "properties": { + "rate": { + "properties": { + "value": { + "type": "long" + }, + "max": { + "type": "long" + } } + }, + "tot": { + "type": "long" } - }, - "tot": { - "type": "long" } - } - }, - - - "cli_abrt": { - "type": "long" - }, - "srv_abrt": { - "type": "long" - }, - - "comp": { - "properties": { - "in": { - "type": "long" - }, - "out": { - "type": "long" - }, - "byp": { - "type": "long" - }, - "rsp": { - "type": "long" + }, + "cli_abrt": { + "type": "long" + }, + "srv_abrt": { + "type": "long" + }, + "comp": { + "properties": { + "in": { + "type": "long" + }, + "out": { + "type": "long" + }, + "byp": { + "type": "long" + }, + "rsp": { + "type": "long" + } } - } - }, - - "last": { - "properties": { - "sess": { - "type": "long" - }, - "chk": { - "type": "long" - }, - "agt": { - "type": "long" + }, + "last": { + "properties": { + "sess": { + "type": "long" + }, + "chk": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "agt": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + } } + }, + "qtime": { + "type": "long" + }, + "ctime": { + "type": "long" + }, + "rtime": { + "type": "long" + }, + "ttime": { + "type": "long" } - }, - - "qtime": { - "type": "long" - }, - "ctime": { - "type": "long" - }, - "rtime": { - "type": "long" - }, - "ttime": { - "type": "long" } } } diff --git a/metricbeat/metricbeat.template.json b/metricbeat/metricbeat.template.json index ed7473525c1..94e6c1df071 100644 --- a/metricbeat/metricbeat.template.json +++ b/metricbeat/metricbeat.template.json @@ -230,234 +230,230 @@ } }, "stat": { - "pxname": { - "ignore_above": 1024, - "index": "not_analyzed", - "type": "string" - }, - "svname": { - "ignore_above": 1024, - "index": "not_analyzed", - "type": "string" - }, - "qcur": { - "type": "long" - }, - "qmax": { - "type": "long" - }, - "scur": { - "type": "long" - }, - "smax": { - "type": "long" - }, - "slim": { - "type": "long" - }, - "stot": { - "type": "long" - }, - "bin": { - "type": "long" - }, - "bout": { - "type": "long" - }, - "breq": { - "type": "long" - }, - "dresp": { - "type": "long" - }, - "ereq": { - "type": "long" - }, - "econ": { - "type": "long" - }, - "eresp": { - "type": "long" - }, - "wretr": { - "type": "long" - }, - "wredis": { - "type": "long" - }, - "status": { - "ignore_above": 1024, - "index": "not_analyzed", - "type": "string" - }, - "weight": { - "type": "long" - }, - "act": { - "type": "long" - }, - "bck": { - "type": "long" - }, - "chkfail": { - "type": "long" - }, - "chkdown": { - "type": "long" - }, - "lastchg": { - "type": "long" - }, - "downtime": { - "type": "long" - }, - "qlimit": { - "type": "long" - }, - "pid": { - "type": "integer" - }, - "iid": { - "type": "integer" - }, - "throttle": { - "type": "integer" - }, - "lbtot": { - "type": "long" - }, - "tracked": { - "type": "integer" - }, - "component_type": { - "type": "integer" - }, - - "rate": { - "properties": { - "value": { - "type": "long" - }, - "lim": { - "type": "long" - }, - "max": { - "type": "long" + "properties": { + "pxname": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "svname": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "qcur": { + "type": "long" + }, + "qmax": { + "type": "long" + }, + "scur": { + "type": "long" + }, + "smax": { + "type": "long" + }, + "slim": { + "type": "long" + }, + "stot": { + "type": "long" + }, + "bin": { + "type": "long" + }, + "bout": { + "type": "long" + }, + "breq": { + "type": "long" + }, + "dresp": { + "type": "long" + }, + "ereq": { + "type": "long" + }, + "econ": { + "type": "long" + }, + "eresp": { + "type": "long" + }, + "wretr": { + "type": "long" + }, + "wredis": { + "type": "long" + }, + "status": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "weight": { + "type": "long" + }, + "act": { + "type": "long" + }, + "bck": { + "type": "long" + }, + "chkfail": { + "type": "long" + }, + "chkdown": { + "type": "long" + }, + "lastchg": { + "type": "long" + }, + "downtime": { + "type": "long" + }, + "qlimit": { + "type": "long" + }, + "pid": { + "type": "integer" + }, + "iid": { + "type": "integer" + }, + "throttle": { + "type": "integer" + }, + "lbtot": { + "type": "long" + }, + "tracked": { + "type": "integer" + }, + "component_type": { + "type": "integer" + }, + "rate": { + "properties": { + "value": { + "type": "long" + }, + "lim": { + "type": "long" + }, + "max": { + "type": "long" + } } - } - }, - - "check": { - "properties": { - "stats": { - "ignore_above": 1024, - "index": "not_analyzed", - "type": "string" - }, - "code": { - "type": "integer" - }, - "duration": { - "type": "long" + }, + "check": { + "properties": { + "stats": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "code": { + "type": "integer" + }, + "duration": { + "type": "long" + } } - } - }, - - "hrsp": { - "properties": { - "1xx": { - "type": "long" - }, - "2xx": { - "type": "long" - }, - "3xx": { - "type": "long" - }, - "4xx": { - "type": "long" - }, - "5xx": { - "type": "long" - }, - "other": { - "type": "long" + }, + "hrsp": { + "properties": { + "1xx": { + "type": "long" + }, + "2xx": { + "type": "long" + }, + "3xx": { + "type": "long" + }, + "4xx": { + "type": "long" + }, + "5xx": { + "type": "long" + }, + "other": { + "type": "long" + } } - } - }, - - "hanafail": { - "type": "long" - }, - - "req": { - "properties": { - "rate": { - "properties": { - "value": { - "type": "long" - }, - "max": { - "type": "long" + }, + "hanafail": { + "type": "long" + }, + "req": { + "properties": { + "rate": { + "properties": { + "value": { + "type": "long" + }, + "max": { + "type": "long" + } } + }, + "tot": { + "type": "long" } - }, - "tot": { - "type": "long" } - } - }, - - - "cli_abrt": { - "type": "long" - }, - "srv_abrt": { - "type": "long" - }, - - "comp": { - "properties": { - "in": { - "type": "long" - }, - "out": { - "type": "long" - }, - "byp": { - "type": "long" - }, - "rsp": { - "type": "long" + }, + "cli_abrt": { + "type": "long" + }, + "srv_abrt": { + "type": "long" + }, + "comp": { + "properties": { + "in": { + "type": "long" + }, + "out": { + "type": "long" + }, + "byp": { + "type": "long" + }, + "rsp": { + "type": "long" + } } - } - }, - - "last": { - "properties": { - "sess": { - "type": "long" - }, - "chk": { - "type": "long" - }, - "agt": { - "type": "long" + }, + "last": { + "properties": { + "sess": { + "type": "long" + }, + "chk": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "agt": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + } } + }, + "qtime": { + "type": "long" + }, + "ctime": { + "type": "long" + }, + "rtime": { + "type": "long" + }, + "ttime": { + "type": "long" } - }, - - "qtime": { - "type": "long" - }, - "ctime": { - "type": "long" - }, - "rtime": { - "type": "long" - }, - "ttime": { - "type": "long" } } } diff --git a/metricbeat/metricbeat.yml b/metricbeat/metricbeat.yml index 9338cc1f2cc..b334cda8b62 100644 --- a/metricbeat/metricbeat.yml +++ b/metricbeat/metricbeat.yml @@ -39,11 +39,22 @@ metricbeat.modules: # Per process stats - process - enabled: true + enabled: false period: 10s processes: ['.*'] +- module: haproxy + metricsets: + - "stat" + - "info" + enabled: true + period: 10s + # The address could also be in the form of a unix socket if the metricbeat process is running locally "unix:///var/run/haproxy-stats.sock" + stats_addr: "tcp://127.0.0.1:14567" + + + #================================ General ===================================== # The name of the shipper that publishes the network data. It can be used to group diff --git a/metricbeat/module/haproxy/stat/_meta/fields.yml b/metricbeat/module/haproxy/stat/_meta/fields.yml index 224d48a9ac3..c309b879ce3 100644 --- a/metricbeat/module/haproxy/stat/_meta/fields.yml +++ b/metricbeat/module/haproxy/stat/_meta/fields.yml @@ -367,9 +367,9 @@ last health check contents or textual error - name: agt - type: integer + type: string description: > - llast agent check contents or textual error + last agent check contents or textual error - name: qtime diff --git a/metricbeat/module/haproxy/stat/data.go b/metricbeat/module/haproxy/stat/data.go index 65ce7b3b6f6..ae4bb7da2c5 100644 --- a/metricbeat/module/haproxy/stat/data.go +++ b/metricbeat/module/haproxy/stat/data.go @@ -89,7 +89,7 @@ var ( "last": s.Object{ "sess": c.Int("LastSess"), "chk": c.Str("LastChk"), - "agt": c.Int("LastAgt"), + "agt": c.Str("LastAgt"), }, "qtime": c.Int("Qtime"), From 6ba4fbd9886376fceb619f7bc830d59169a9043e Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Mon, 5 Sep 2016 08:58:41 -0400 Subject: [PATCH 18/19] Removed unecessary Sprintf & added experimental warning --- metricbeat/module/haproxy/info/info.go | 2 ++ metricbeat/module/haproxy/stat/stat.go | 8 +++++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/metricbeat/module/haproxy/info/info.go b/metricbeat/module/haproxy/info/info.go index 9fb7601f748..0801ab78a17 100644 --- a/metricbeat/module/haproxy/info/info.go +++ b/metricbeat/module/haproxy/info/info.go @@ -41,6 +41,8 @@ type MetricSet struct { // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + logp.Warn("EXPERIMENTAL: The haproxy info metricset is experimental") + config := struct { StatsAddr string `config:"stats_addr"` }{ diff --git a/metricbeat/module/haproxy/stat/stat.go b/metricbeat/module/haproxy/stat/stat.go index 226553cfbbe..d350a7eec2d 100644 --- a/metricbeat/module/haproxy/stat/stat.go +++ b/metricbeat/module/haproxy/stat/stat.go @@ -9,7 +9,7 @@ import ( ) const ( - // defaultSocket is the default path to the unix socket tfor stats on haproxy. + // defaultSocket is the default path to the unix socket for stats on haproxy. statsMethod = "stat" defaultAddr = "unix:///var/lib/haproxy/stats" ) @@ -41,6 +41,8 @@ type MetricSet struct { // configuration entries if needed. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + logp.Warn("EXPERIMENTAL: The haproxy stat metricset is experimental") + config := struct { StatsAddr string `config:"stats_addr"` }{ @@ -65,13 +67,13 @@ func (m *MetricSet) Fetch() ([]common.MapStr, error) { hapc, err := haproxy.NewHaproxyClient(m.statsAddr) if err != nil { - return nil, fmt.Errorf(fmt.Sprintf("HAProxy Client error: %s", err)) + return nil, fmt.Errorf("HAProxy Client error: %s", err) } res, err := hapc.GetStat() if err != nil { - return nil, fmt.Errorf(fmt.Sprintf("HAProxy Client error fetching %s: %s", statsMethod, err)) + return nil, fmt.Errorf("HAProxy Client error fetching %s: %s", statsMethod, err) } m.counter++ From e768cd21e75e428b4bced755e2481ed293b98267 Mon Sep 17 00:00:00 2001 From: Al Lefebvre Date: Wed, 7 Sep 2016 18:19:17 -0400 Subject: [PATCH 19/19] Restored original version of file --- metricbeat/etc/beat.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/metricbeat/etc/beat.yml b/metricbeat/etc/beat.yml index 081c579b03b..984356ef6ac 100644 --- a/metricbeat/etc/beat.yml +++ b/metricbeat/etc/beat.yml @@ -43,6 +43,4 @@ metricbeat.modules: period: 10s processes: ['.*'] - # if true, exports the CPU usage in ticks, together with the percentage values - cpu_ticks: false