diff --git a/metricbeat/.gitignore b/metricbeat/.gitignore index 5a4866e0638b..5dcd9d08b70d 100644 --- a/metricbeat/.gitignore +++ b/metricbeat/.gitignore @@ -2,4 +2,4 @@ build /metricbeat /metricbeat.test -/docs/html_docs +/docs/html_docs \ No newline at end of file diff --git a/metricbeat/docker-compose.yml b/metricbeat/docker-compose.yml index 50c701be7778..449667577c3d 100644 --- a/metricbeat/docker-compose.yml +++ b/metricbeat/docker-compose.yml @@ -9,6 +9,7 @@ beat: - postgresql - redis - zookeeper + - haproxy environment: - APACHE_HOST=apache - APACHE_PORT=80 @@ -28,6 +29,8 @@ beat: - POSTGRESQL_PORT=5432 - ZOOKEEPER_HOST=zookeeper - ZOOKEEPER_PORT=2181 + - HAPROXY_HOST=haproxy + - HAPROXY_PORT=14567 - TEST_ENVIRONMENT=false working_dir: /go/src/github.com/elastic/beats/metricbeat volumes: @@ -58,6 +61,9 @@ mysql: nginx: build: ${PWD}/module/nginx/_meta +haproxy: + build: ${PWD}/module/haproxy/_meta + postgresql: image: postgres:9.5.3 diff --git a/metricbeat/docker-entrypoint.sh b/metricbeat/docker-entrypoint.sh index 1aa5e91b20b4..4bc5cc2e22ba 100755 --- a/metricbeat/docker-entrypoint.sh +++ b/metricbeat/docker-entrypoint.sh @@ -29,4 +29,5 @@ waitFor ${NGINX_HOST} ${NGINX_PORT} Nginx waitFor ${POSTGRESQL_HOST} ${POSTGRESQL_PORT} Postgresql waitFor ${REDIS_HOST} ${REDIS_PORT} Redis waitFor ${ZOOKEEPER_HOST} ${ZOOKEEPER_PORT} Zookeeper +waitFor ${HAPROXY_HOST} ${HAPROXY_PORT} HAProxy exec "$@" diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index b8116fbef1de..07fad0bb85d5 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -16,6 +16,7 @@ grouped in the following categories: * <> * <> * <> +* <> * <> * <> * <> @@ -699,6 +700,34 @@ required: True The document type. Always set to "metricsets". +[[exported-fields-haproxy]] +== haproxy Fields + +haproxy Module + + + +[float] +== haproxy Fields + + + + +[float] +== stat Fields + +stat + + + +[float] +=== haproxy.stat.example + +type: keyword + +Example field + + [[exported-fields-mongodb]] == MongoDB Fields diff --git a/metricbeat/docs/modules/haproxy.asciidoc b/metricbeat/docs/modules/haproxy.asciidoc new file mode 100644 index 000000000000..f5b6fb26885e --- /dev/null +++ b/metricbeat/docs/modules/haproxy.asciidoc @@ -0,0 +1,37 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-module-haproxy]] +== haproxy Module + +This is the haproxy Module. + + + +[float] +=== Example Configuration + +The haproxy module supports the standard configuration options that are described +in <>. Here is an example configuration: + +[source,yaml] +---- +metricbeat.modules: +- module: haproxy + metricsets: ["stat"] + enabled: true + period: 1s + hosts: ["localhost"] + +---- + +[float] +=== Metricsets + +The following metricsets are available: + +* <> + +include::haproxy/stat.asciidoc[] + diff --git a/metricbeat/docs/modules/haproxy/stat.asciidoc b/metricbeat/docs/modules/haproxy/stat.asciidoc new file mode 100644 index 000000000000..a6ff8f587e1d --- /dev/null +++ b/metricbeat/docs/modules/haproxy/stat.asciidoc @@ -0,0 +1,19 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[metricbeat-metricset-haproxy-stat]] +include::../../../module/haproxy/stat/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/haproxy/stat/_meta/data.json[] +---- diff --git a/metricbeat/etc/beat.full.yml b/metricbeat/etc/beat.full.yml index dc8cdc778ddd..d859db2e7493 100644 --- a/metricbeat/etc/beat.full.yml +++ b/metricbeat/etc/beat.full.yml @@ -67,6 +67,15 @@ metricbeat.modules: # Password of hosts. Empty by default #password: test123 +#------------------------------- haproxy Module ------------------------------ +#- module: haproxy + #metricsets: + #- "stat" + #- "info" + #enabled: true + #period: 10s + #stats_addr: "tcp://127.0.0.1:14567 + #-------------------------------- beats Module ------------------------------- #- module: beats #metricsets: ["filebeat", "libbeat"] diff --git a/metricbeat/etc/fields.yml b/metricbeat/etc/fields.yml index 0818ff2d0f78..8e3e3560c246 100644 --- a/metricbeat/etc/fields.yml +++ b/metricbeat/etc/fields.yml @@ -208,6 +208,570 @@ type: long description: > Total. + +- key: haproxy + title: "haproxy" + description: > + haproxy Module + fields: + - name: haproxy + type: group + description: > + fields: + - name: stat + type: group + description: > + Results from haproxy stat + fields: + - name: pxname + type: string + description: > + proxy name + + - name: svname + type: string + description: > + service name (FRONTEND for frontend, BACKEND for backend, any name for server/listener) + + - name: qcur + type: integer + description: > + current queued requests. For the backend this reports the number queued without a server assigned + + - name: qmax + type: integer + description: > + max value of qcur + + - name: scur + type: integer + description: > + current sessions + + - name: smax + type: integer + description: > + max sessions + + - name: slim + type: integer + description: > + configured session limit + + - name: stot + type: string + description: > + cumulative number of connections + + - name: bin + type: integer + description: > + bytes in + + - name: bout + type: integer + description: > + bytes out + + - name: dreq + type: integer + description: > + requests denied because of security concerns. + * For tcp this is because of a matched tcp-request content rule. + * For http this is because of a matched http-request or tarpit rule. + + - name: dresp + type: integer + description: > + responses denied because of security concerns. + * For http this is because of a matched http-request rule, or "option checkcache". + + - name: ereq + type: integer + description: > + request errors. Some of the possible causes are: + * early termination from the client, before the request has been sent. + * read error from the client + * client timeout + * client closed connection + * various bad requests from the client. + * request was tarpitted. + + - name: econ + type: integer + description: > + number of requests that encountered an error trying to + connect to a backend server. The backend stat is the sum of the stat + for all servers of that backend, plus any connection errors not + associated with a particular server (such as the backend having no + active servers). + + - name: eresp + type: integer + description: > + response errors. srv_abrt will be counted here also. + Some other errors are: + * write error on the client socket (won't be counted for the server stat) + * failure applying filters to the response. + + - name: wretr + type: integer + description: > + number of times a connection to a server was retried. + + - name: wredis + type: integer + description: > + number of times a request was redispatched to another + server. The server value counts the number of times that server was + switched away from. + + - name: status + type: string + description: > + status (UP/DOWN/NOLB/MAINT/MAINT(via)...) + + - name: weight + type: integer + description: > + total weight (backend), server weight (server) + + - name: act + type: integer + description: > + number of active servers (backend), server is active (server) + + - name: bck + type: integer + description: > + number of backup servers (backend), server is backup (server) + + - name: chkfail + type: integer + description: > + number of failed checks. (Only counts checks failed when + the server is up.) + + - name: chkdown + type: integer + description: > + number of UP->DOWN transitions. The backend counter counts + transitions to the whole backend being down, rather than the sum of the + counters for each server. + + - name: lastchg + type: integer + description: > + number of seconds since the last UP<->DOWN transition + + - name: downtime + type: integer + description: > + total downtime (in seconds). The value for the backend + is the downtime for the whole backend, not the sum of the server downtime. + + - name: qlimit + type: integer + description: > + configured maxqueue for the server, or nothing in the + value is 0 (default, meaning no limit) + + - name: pid + type: integer + description: > + process id (0 for first instance, 1 for second, ...) + + - name: iid + type: integer + description: > + unique proxy id + + - name: sid + type: integer + description: > + server id (unique inside a proxy) + + - name: throttle + type: integer + description: > + current throttle percentage for the server, when + slowstart is active, or no value if not in slowstart. + + - name: lbtot + type: integer + description: > + total number of times a server was selected, either for new + sessions, or when re-dispatching. The server counter is the number + of times that server was selected. + + - name: tracked + type: integer + description: > + id of proxy/server if tracking is enabled. + + - name: type + type: integer + description: > + (0=frontend, 1=backend, 2=server, 3=socket/listener) + + - name: rate + type: integer + description: > + number of sessions per second over last elapsed second + + - name: rate_lim + type: integer + description: > + configured limit on new sessions per second + + - name: rate_max + type: integer + description: > + max number of new sessions per second + + - name: check_status + type: string + description: > + status of last health check, one of: + UNK -> unknown + INI -> initializing + SOCKERR -> socket error + L4OK -> check passed on layer 4, no upper layers testing enabled + L4TOUT -> layer 1-4 timeout + L4CON -> layer 1-4 connection problem, for example + "Connection refused" (tcp rst) or "No route to host" (icmp) + L6OK -> check passed on layer 6 + L6TOUT -> layer 6 (SSL) timeout + L6RSP -> layer 6 invalid response - protocol error + L7OK -> check passed on layer 7 + L7OKC -> check conditionally passed on layer 7, for example 404 with + disable-on-404 + L7TOUT -> layer 7 (HTTP/SMTP) timeout + L7RSP -> layer 7 invalid response - protocol error + L7STS -> layer 7 response error, for example HTTP 5xx + + - name: check_code + type: integer + description: > + layer5-7 code, if available + + - name: check_duration + type: integer + description: > + time in ms took to finish last health check + + - name: hrsp_1xx + type: integer + description: > + http responses with 1xx code + + - name: hrsp_2xx + type: integer + description: > + http responses with 2xx code + + - name: hrsp_3xx + type: integer + description: > + http responses with 3xx code + + - name: hrsp_4xx + type: integer + description: > + http responses with 4xx code + + - name: hrsp_5xx + type: integer + description: > + http responses with 5xx code + + - name: hrsp_other + type: integer + description: > + http responses with other codes (protocol error) + + - name: hanafail + type: integer + description: > + failed health checks details + + - name: req_rate + type: integer + description: > + HTTP requests per second over last elapsed second + + - name: req_rate_max + type: integer + description: > + max number of HTTP requests per second observed + + - name: req_tot + type: integer + description: > + total number of HTTP requests received + + - name: cli_abrt + type: integer + description: > + number of data transfers aborted by the client + + - name: srv_abrt + type: integer + description: > + number of data transfers aborted by the server (inc. in eresp) + + - name: comp_in + type: integer + description: > + number of HTTP response bytes fed to the compressor + + - name: comp_out + type: integer + description: > + number of HTTP response bytes emitted by the compressor + + - name: comp_byp + type: integer + description: > + number of bytes that bypassed the HTTP compressor (CPU/BW limit) + + - name: comp_rsp + type: integer + description: > + number of HTTP responses that were compressed + + - name: lastsess + type: integer + description: > + number of seconds since last session assigned to server/backend + + - name: last_chk + type: string + description: > + last health check contents or textual error + + - name: last_agt + type: integer + description: > + llast agent check contents or textual error + + - name: qtime + type: integer + description: > + the average queue time in ms over the 1024 last requests + + - name: ctime + type: integer + description: > + the average connect time in ms over the 1024 last requests + + - name: rtime + type: integer + description: > + the average response time in ms over the 1024 last requests (0 for TCP) + + - name: ttime + type: integer + description: > + the average total session time in ms over the 1024 last requests + + - name: info + type: group + description: > + Results from haproxy info + fields: + - name: nb_proc + type: integer + description: > + Number of processes + + - name: process_num + type: integer + description: > + Process number + + - name: pid + type: integer + description: > + Process ID + + - name: uptime_sec + type: integer + description: > + Current uptime in seconds + + - name: mem_max_mb + type: integer + format: bytes + description: > + Max number of memory usage in MB + + - name: ulimit_n + type: integer + description: > + Max number of open files for process + + - name: max_sock + type: integer + description: > + + - name: max_conn + type: integer + description: > + + - name: hard_max_conn + type: integer + description: > + + - name: curr_conns + type: integer + description: > + + - name: cum_conns + type: integer + description: > + + - name: cum_req + type: integer + description: > + + - name: max_ssl_conns + type: integer + description: > + + - name: curr_ssl_conns + type: integer + description: > + + - name: cum_ssl_conns + type: integer + description: > + + - name: max_pipes + type: integer + description: > + + - name: pipes_used + type: integer + description: > + + - name: pipes_free + type: integer + description: > + + - name: conn_rate + type: integer + description: > + + - name: conn_rate_limit + type: integer + description: > + + - name: max_conn_rate + type: integer + description: > + + - name: sess_rate + type: integer + description: > + + - name: sess_rate_limit + type: integer + description: > + + - name: max_sess_rate + type: integer + description: > + + - name: ssl_rate + type: integer + description: > + + - name: ssl_rate_limit + type: integer + description: > + + - name: max_ssl_rate + type: integer + description: > + + - name: ssl_frontend_key_rate + type: integer + description: > + + - name: ssl_frontend_max_key_rate + type: integer + description: > + + - name: ssl_frontend_session_reuse_pct + type: integer + description: > + + - name: ssl_babckend_key_rate + type: integer + description: > + + - name: ssl_frontend_key_rate + type: integer + description: > + + - name: ssl_frontend_max_key_rate + type: integer + description: > + + - name: ssl_frontend_session_reuse_pct + type: integer + description: > + + - name: ssl_babckend_key_rate + type: integer + description: > + + - name: ssl_backend_max_key_rate + type: integer + description: > + + - name: ssl_cached_lookups + type: integer + description: > + + - name: ssl_cache_misses + type: integer + description: > + + - name: compress_bps_in + type: integer + description: > + + - name: compress_bps_out + type: integer + description: > + + - name: compress_bps_rate_limit + type: integer + description: > + + - name: zlib_mem_usage + type: integer + description: > + + - name: max_zlib_mem_usage + type: integer + description: > + + - name: tasks + type: integer + description: > + + - name: run_queue + type: integer + description: > + + - name: idle_pct + type: scaled_float + format: percent + description: > + - key: beats title: "beats" description: > @@ -374,6 +938,7 @@ type: long description: > Total number of events published + - key: mongodb title: "MongoDB" description: > diff --git a/metricbeat/include/list.go b/metricbeat/include/list.go index e8b7892ae964..2ba265d37c37 100644 --- a/metricbeat/include/list.go +++ b/metricbeat/include/list.go @@ -13,6 +13,9 @@ import ( _ "github.com/elastic/beats/metricbeat/module/beats" _ "github.com/elastic/beats/metricbeat/module/beats/filebeat" _ "github.com/elastic/beats/metricbeat/module/beats/libbeat" + _ "github.com/elastic/beats/metricbeat/module/haproxy" + _ "github.com/elastic/beats/metricbeat/module/haproxy/info" + _ "github.com/elastic/beats/metricbeat/module/haproxy/stat" _ "github.com/elastic/beats/metricbeat/module/mongodb" _ "github.com/elastic/beats/metricbeat/module/mongodb/status" _ "github.com/elastic/beats/metricbeat/module/mysql" diff --git a/metricbeat/metricbeat.full.yml b/metricbeat/metricbeat.full.yml index 2a0b2347320e..a41afb2e1c02 100644 --- a/metricbeat/metricbeat.full.yml +++ b/metricbeat/metricbeat.full.yml @@ -67,6 +67,16 @@ metricbeat.modules: # Password of hosts. Empty by default #password: test123 +#------------------------------- haproxy Module ------------------------------ +#- module: haproxy + #metricsets: + #- "stat" + #- "info" + #enabled: true + #period: 10s + # The address could also be in the form of a unix socket if the metricbeat process is running locally "unix:///var/run/haproxy-stats.sock" + #stats_addr: "tcp://127.0.0.1:14567" + #-------------------------------- beats Module ------------------------------- #- module: beats #metricsets: ["filebeat", "libbeat"] @@ -77,7 +87,6 @@ metricbeat.modules: # Path to httpprof variables #vars_path: /debug/vars - #------------------------------- MongoDB Module ------------------------------ #- module: mongodb #metricsets: ["status"] diff --git a/metricbeat/metricbeat.template-es2x.json b/metricbeat/metricbeat.template-es2x.json index 9c88b144f259..73e3335f2cb6 100644 --- a/metricbeat/metricbeat.template-es2x.json +++ b/metricbeat/metricbeat.template-es2x.json @@ -82,10 +82,10 @@ "1": { "type": "float" }, - "15": { + "5": { "type": "float" }, - "5": { + "15": { "type": "float" } } @@ -298,6 +298,444 @@ } } }, + "haproxy": { + "properties": { + "info": { + "properties": { + + "nb_proc": { + "type": "long" + }, + "process_num": { + "type": "long" + }, + "pid": { + "type": "integer" + }, + "uptime_sec": { + "type": "long" + }, + "bytes": { + "type": "long" + }, + "ulimit_n": { + "type": "integer" + }, + + "compress": { + "properties": { + "bps": { + "properties": { + "in": { + "type": "long" + }, + "out": { + "type": "long" + }, + "rate_limit": { + "type": "long" + } + } + } + } + }, + + "max": { + "properties": { + "sock": { + "type": "long" + }, + "conn": { + "properties": { + "value": { + "type": "integer" + }, + "rate": { + "type": "long" + } + } + }, + "hard_conn": { + "type": "integer" + }, + "sess_rate": { + "type": "long" + }, + "ssl_rate": { + "type": "long" + }, + "zlib_mem_usage": { + "type": "long" + }, + "ssl_conns": { + "type": "long" + }, + "pipes": { + "type": "long" + } + } + }, + + "curr": { + "properties": { + "conns": { + "type": "integer" + }, + "ssl_conns": { + "type": "integer" + } + } + }, + + + "cum": { + "properties": { + "conns": { + "type": "integer" + }, + "req": { + "type": "long" + }, + "ssl_conns": { + "type": "long" + } + } + }, + + "pipes": { + "properties": { + "used": { + "type": "long" + }, + "free": { + "type": "long" + } + } + }, + + "conn": { + "properties": { + "rate": { + "properties": { + "value": { + "type": "long" + }, + "limit": { + "type": "long" + } + } + } + } + }, + + "sess": { + "properties": { + "rate": { + "properties": { + "value": { + "type": "long" + }, + "limit": { + "type": "long" + } + } + } + } + }, + + + "ssl": { + "properties": { + + "rate": { + "properties": { + "value": { + "type": "long" + }, + "limit": { + "type": "long" + } + } + }, + + "frontend": { + "properties": { + "key_rate": { + "type": "long" + }, + "max_key_rate": { + "type": "long" + }, + "session_reuse_pct": { + "type": "long" + } + } + }, + + "backend": { + "properties": { + "key_rate": { + "type": "long" + }, + "max_key_rate": { + "type": "long" + } + } + }, + + "cached_lookups": { + "type": "long" + }, + "cache_misses": { + "type": "long" + } + + } + }, + + "zlib_mem_usage": { + "type": "long" + }, + "tasks": { + "type": "long" + }, + "run_queue": { + "type": "long" + }, + "idle_pct": { + "type": "float" + } + } + }, + "stat": { + "properties": { + "pxname": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "svname": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "qcur": { + "type": "long" + }, + "qmax": { + "type": "long" + }, + "scur": { + "type": "long" + }, + "smax": { + "type": "long" + }, + "slim": { + "type": "long" + }, + "stot": { + "type": "long" + }, + "bin": { + "type": "long" + }, + "bout": { + "type": "long" + }, + "breq": { + "type": "long" + }, + "dresp": { + "type": "long" + }, + "ereq": { + "type": "long" + }, + "econ": { + "type": "long" + }, + "eresp": { + "type": "long" + }, + "wretr": { + "type": "long" + }, + "wredis": { + "type": "long" + }, + "status": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "weight": { + "type": "long" + }, + "act": { + "type": "long" + }, + "bck": { + "type": "long" + }, + "chkfail": { + "type": "long" + }, + "chkdown": { + "type": "long" + }, + "lastchg": { + "type": "long" + }, + "downtime": { + "type": "long" + }, + "qlimit": { + "type": "long" + }, + "pid": { + "type": "integer" + }, + "iid": { + "type": "integer" + }, + "throttle": { + "type": "integer" + }, + "lbtot": { + "type": "long" + }, + "tracked": { + "type": "integer" + }, + "component_type": { + "type": "integer" + }, + "rate": { + "properties": { + "value": { + "type": "long" + }, + "lim": { + "type": "long" + }, + "max": { + "type": "long" + } + } + }, + "check": { + "properties": { + "stats": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "code": { + "type": "integer" + }, + "duration": { + "type": "long" + } + } + }, + "hrsp": { + "properties": { + "1xx": { + "type": "long" + }, + "2xx": { + "type": "long" + }, + "3xx": { + "type": "long" + }, + "4xx": { + "type": "long" + }, + "5xx": { + "type": "long" + }, + "other": { + "type": "long" + } + } + }, + "hanafail": { + "type": "long" + }, + "req": { + "properties": { + "rate": { + "properties": { + "value": { + "type": "long" + }, + "max": { + "type": "long" + } + } + }, + "tot": { + "type": "long" + } + } + }, + "cli_abrt": { + "type": "long" + }, + "srv_abrt": { + "type": "long" + }, + "comp": { + "properties": { + "in": { + "type": "long" + }, + "out": { + "type": "long" + }, + "byp": { + "type": "long" + }, + "rsp": { + "type": "long" + } + } + }, + "last": { + "properties": { + "sess": { + "type": "long" + }, + "chk": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "agt": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + } + } + }, + "qtime": { + "type": "long" + }, + "ctime": { + "type": "long" + }, + "rtime": { + "type": "long" + }, + "ttime": { + "type": "long" + } + } + } + } + }, "metricset": { "properties": { "host": { @@ -1665,10 +2103,10 @@ "1": { "type": "float" }, - "15": { + "5": { "type": "float" }, - "5": { + "15": { "type": "float" }, "norm": { @@ -1676,10 +2114,10 @@ "1": { "type": "float" }, - "15": { + "5": { "type": "float" }, - "5": { + "15": { "type": "float" } } diff --git a/metricbeat/metricbeat.template.json b/metricbeat/metricbeat.template.json index 5d858f097faa..94e6c1df0715 100644 --- a/metricbeat/metricbeat.template.json +++ b/metricbeat/metricbeat.template.json @@ -20,6 +20,444 @@ "@timestamp": { "type": "date" }, + "haproxy": { + "properties": { + "info": { + "properties": { + + "nb_proc": { + "type": "long" + }, + "process_num": { + "type": "long" + }, + "pid": { + "type": "integer" + }, + "uptime_sec": { + "type": "long" + }, + "bytes": { + "type": "long" + }, + "ulimit_n": { + "type": "integer" + }, + + "compress": { + "properties": { + "bps": { + "properties": { + "in": { + "type": "long" + }, + "out": { + "type": "long" + }, + "rate_limit": { + "type": "long" + } + } + } + } + }, + + "max": { + "properties": { + "sock": { + "type": "long" + }, + "conn": { + "properties": { + "value": { + "type": "integer" + }, + "rate": { + "type": "long" + } + } + }, + "hard_conn": { + "type": "integer" + }, + "sess_rate": { + "type": "long" + }, + "ssl_rate": { + "type": "long" + }, + "zlib_mem_usage": { + "type": "long" + }, + "ssl_conns": { + "type": "long" + }, + "pipes": { + "type": "long" + } + } + }, + + "curr": { + "properties": { + "conns": { + "type": "integer" + }, + "ssl_conns": { + "type": "integer" + } + } + }, + + + "cum": { + "properties": { + "conns": { + "type": "integer" + }, + "req": { + "type": "long" + }, + "ssl_conns": { + "type": "long" + } + } + }, + + "pipes": { + "properties": { + "used": { + "type": "long" + }, + "free": { + "type": "long" + } + } + }, + + "conn": { + "properties": { + "rate": { + "properties": { + "value": { + "type": "long" + }, + "limit": { + "type": "long" + } + } + } + } + }, + + "sess": { + "properties": { + "rate": { + "properties": { + "value": { + "type": "long" + }, + "limit": { + "type": "long" + } + } + } + } + }, + + + "ssl": { + "properties": { + + "rate": { + "properties": { + "value": { + "type": "long" + }, + "limit": { + "type": "long" + } + } + }, + + "frontend": { + "properties": { + "key_rate": { + "type": "long" + }, + "max_key_rate": { + "type": "long" + }, + "session_reuse_pct": { + "type": "long" + } + } + }, + + "backend": { + "properties": { + "key_rate": { + "type": "long" + }, + "max_key_rate": { + "type": "long" + } + } + }, + + "cached_lookups": { + "type": "long" + }, + "cache_misses": { + "type": "long" + } + + } + }, + + "zlib_mem_usage": { + "type": "long" + }, + "tasks": { + "type": "long" + }, + "run_queue": { + "type": "long" + }, + "idle_pct": { + "type": "float" + } + } + }, + "stat": { + "properties": { + "pxname": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "svname": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "qcur": { + "type": "long" + }, + "qmax": { + "type": "long" + }, + "scur": { + "type": "long" + }, + "smax": { + "type": "long" + }, + "slim": { + "type": "long" + }, + "stot": { + "type": "long" + }, + "bin": { + "type": "long" + }, + "bout": { + "type": "long" + }, + "breq": { + "type": "long" + }, + "dresp": { + "type": "long" + }, + "ereq": { + "type": "long" + }, + "econ": { + "type": "long" + }, + "eresp": { + "type": "long" + }, + "wretr": { + "type": "long" + }, + "wredis": { + "type": "long" + }, + "status": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "weight": { + "type": "long" + }, + "act": { + "type": "long" + }, + "bck": { + "type": "long" + }, + "chkfail": { + "type": "long" + }, + "chkdown": { + "type": "long" + }, + "lastchg": { + "type": "long" + }, + "downtime": { + "type": "long" + }, + "qlimit": { + "type": "long" + }, + "pid": { + "type": "integer" + }, + "iid": { + "type": "integer" + }, + "throttle": { + "type": "integer" + }, + "lbtot": { + "type": "long" + }, + "tracked": { + "type": "integer" + }, + "component_type": { + "type": "integer" + }, + "rate": { + "properties": { + "value": { + "type": "long" + }, + "lim": { + "type": "long" + }, + "max": { + "type": "long" + } + } + }, + "check": { + "properties": { + "stats": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "code": { + "type": "integer" + }, + "duration": { + "type": "long" + } + } + }, + "hrsp": { + "properties": { + "1xx": { + "type": "long" + }, + "2xx": { + "type": "long" + }, + "3xx": { + "type": "long" + }, + "4xx": { + "type": "long" + }, + "5xx": { + "type": "long" + }, + "other": { + "type": "long" + } + } + }, + "hanafail": { + "type": "long" + }, + "req": { + "properties": { + "rate": { + "properties": { + "value": { + "type": "long" + }, + "max": { + "type": "long" + } + } + }, + "tot": { + "type": "long" + } + } + }, + "cli_abrt": { + "type": "long" + }, + "srv_abrt": { + "type": "long" + }, + "comp": { + "properties": { + "in": { + "type": "long" + }, + "out": { + "type": "long" + }, + "byp": { + "type": "long" + }, + "rsp": { + "type": "long" + } + } + }, + "last": { + "properties": { + "sess": { + "type": "long" + }, + "chk": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + }, + "agt": { + "ignore_above": 1024, + "index": "not_analyzed", + "type": "string" + } + } + }, + "qtime": { + "type": "long" + }, + "ctime": { + "type": "long" + }, + "rtime": { + "type": "long" + }, + "ttime": { + "type": "long" + } + } + } + } + }, "apache": { "properties": { "status": { @@ -1659,11 +2097,11 @@ "scaling_factor": 100, "type": "scaled_float" }, - "15": { + "5": { "scaling_factor": 100, "type": "scaled_float" }, - "5": { + "15": { "scaling_factor": 100, "type": "scaled_float" }, @@ -1673,11 +2111,11 @@ "scaling_factor": 100, "type": "scaled_float" }, - "15": { + "5": { "scaling_factor": 100, "type": "scaled_float" }, - "5": { + "15": { "scaling_factor": 100, "type": "scaled_float" } diff --git a/metricbeat/metricbeat.yml b/metricbeat/metricbeat.yml index 5678d6364ee7..b334cda8b628 100644 --- a/metricbeat/metricbeat.yml +++ b/metricbeat/metricbeat.yml @@ -39,11 +39,21 @@ metricbeat.modules: # Per process stats - process - enabled: true + enabled: false period: 10s processes: ['.*'] +- module: haproxy + metricsets: + - "stat" + - "info" + enabled: true + period: 10s + # The address could also be in the form of a unix socket if the metricbeat process is running locally "unix:///var/run/haproxy-stats.sock" + stats_addr: "tcp://127.0.0.1:14567" + + #================================ General ===================================== diff --git a/metricbeat/module/haproxy/_meta/Dockerfile b/metricbeat/module/haproxy/_meta/Dockerfile new file mode 100644 index 000000000000..4efd57ec6cbf --- /dev/null +++ b/metricbeat/module/haproxy/_meta/Dockerfile @@ -0,0 +1,2 @@ +FROM haproxy:1.6 +COPY ./haproxy.conf /usr/local/etc/haproxy/haproxy.cfg \ No newline at end of file diff --git a/metricbeat/module/haproxy/_meta/config.full.yml b/metricbeat/module/haproxy/_meta/config.full.yml new file mode 100644 index 000000000000..e01d0d646c63 --- /dev/null +++ b/metricbeat/module/haproxy/_meta/config.full.yml @@ -0,0 +1,11 @@ +#- module: haproxy + + #metricsets: + #- "stat" + #- "info" + + #enabled: true + + #period: 10s + + #stats_addr: "tcp://127.0.0.1:14567 diff --git a/metricbeat/module/haproxy/_meta/config.yml b/metricbeat/module/haproxy/_meta/config.yml new file mode 100644 index 000000000000..f2589e7ae198 --- /dev/null +++ b/metricbeat/module/haproxy/_meta/config.yml @@ -0,0 +1,7 @@ +- module: haproxy + metricsets: + - "stat" + - "info" + enabled: true + period: 10s + stats_addr: "tcp://127.0.0.1:14567" diff --git a/metricbeat/module/haproxy/_meta/docs.asciidoc b/metricbeat/module/haproxy/_meta/docs.asciidoc new file mode 100644 index 000000000000..42bd58464a04 --- /dev/null +++ b/metricbeat/module/haproxy/_meta/docs.asciidoc @@ -0,0 +1,9 @@ +== haproxy Module + +This is the haproxy Module. To enable stats collection from HAProxy, you must enable the stats socket via TCP. +For example, placing the following statement under the `global` or `default` section of the haproxy config: + +`stats socket 127.0.0.1:14567` + +will enable stats reporting via any local IP on port 14567. Please note that you should probably use an internal private IP +or secure this with a firewall rule so that only designated hosts can access this data. diff --git a/metricbeat/module/haproxy/_meta/fields.yml b/metricbeat/module/haproxy/_meta/fields.yml new file mode 100644 index 000000000000..b3b1127c157b --- /dev/null +++ b/metricbeat/module/haproxy/_meta/fields.yml @@ -0,0 +1,9 @@ +- key: haproxy + title: "haproxy" + description: > + haproxy Module + fields: + - name: haproxy + type: group + description: > + fields: diff --git a/metricbeat/module/haproxy/_meta/haproxy.conf b/metricbeat/module/haproxy/_meta/haproxy.conf new file mode 100644 index 000000000000..ba79afc372f7 --- /dev/null +++ b/metricbeat/module/haproxy/_meta/haproxy.conf @@ -0,0 +1,39 @@ + +global + # maximum number of simultaneous active connections + maxconn 4000 + #daemon + user root + group staff + + # for restarts + pidfile /var/run/haproxy.pid + # Logging to syslog facility local0 + log 127.0.0.1 local0 + stats socket 0.0.0.0:14567 + spread-checks 5 + #debug + +defaults + + log global + mode http + balance roundrobin + maxconn 25000 + option httplog + option abortonclose + option httpclose + option forwardfor + retries 3 + option redispatch + + timeout client 30s + timeout connect 30s + timeout server 30s + option httpchk HEAD /haproxy?monitor HTTP/1.0 + timeout check 5s + +listen http-webservices + + bind 0.0.0.0:8888 + server log1 127.0.0.1:8889 check diff --git a/metricbeat/module/haproxy/doc.go b/metricbeat/module/haproxy/doc.go new file mode 100644 index 000000000000..019acc1f634f --- /dev/null +++ b/metricbeat/module/haproxy/doc.go @@ -0,0 +1,4 @@ +/* +Package haproxy is a Metricbeat module that contains MetricSets. +*/ +package haproxy diff --git a/metricbeat/module/haproxy/haproxy.go b/metricbeat/module/haproxy/haproxy.go new file mode 100644 index 000000000000..32ab45465fd6 --- /dev/null +++ b/metricbeat/module/haproxy/haproxy.go @@ -0,0 +1,244 @@ +package haproxy + +import ( + "bytes" + "encoding/csv" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "strings" + + "github.com/gocarina/gocsv" + "github.com/mitchellh/mapstructure" +) + +// Stat is an instance of the HAProxy stat information +type Stat struct { + PxName string `csv:"# pxname"` + SvName string `csv:"svname"` + Qcur string `csv:"qcur"` + Qmax string `csv:"qmax"` + Scur string `csv:"scur"` + Smax string `csv:"smax"` + Slim string `csv:"slim"` + Stot string `csv:"stot"` + Bin string `csv:"bin"` + Bout string `csv:"bout"` + Dreq string `csv:"dreq"` + Dresp string `csv:"dresp"` + Ereq string `csv:"ereq"` + Econ string `csv:"econ"` + Eresp string `csv:"eresp"` + Wretr string `csv:"wretr"` + Wredis string `csv:"wredis"` + Status string `csv:"status"` + Weight string `csv:"weight"` + Act string `csv:"act"` + Bck string `csv:"bck"` + ChkFail string `csv:"chkfail"` + ChkDown string `csv:"chkdown"` + Lastchg string `csv:"lastchg"` + Downtime string `csv:"downtime"` + Qlimit string `csv:"qlimit"` + Pid string `csv:"pid"` + Iid string `csv:"iid"` + Sid string `csv:"sid"` + Throttle string `csv:"throttle"` + Lbtot string `csv:"lbtot"` + Tracked string `csv:"tracked"` + Type string `csv:"type"` + Rate string `csv:"rate"` + RateLim string `csv:"rate_lim"` + RateMax string `csv:"rate_max"` + CheckStatus string `csv:"check_status"` + CheckCode string `csv:"check_code"` + CheckDuration string `csv:"check_duration"` + Hrsp1xx string `csv:"hrsp_1xx"` + Hrsp2xx string `csv:"hrsp_2xx"` + Hrsp3xx string `csv:"hrsp_3xx"` + Hrsp4xx string `csv:"hrsp_4xx"` + Hrsp5xx string `csv:"hrsp_5xx"` + HrspOther string `csv:"hrsp_other"` + Hanafail string `csv:"hanafail"` + ReqRate string `csv:"req_rate"` + ReqRateMax string `csv:"req_rate_max"` + ReqTot string `csv:"req_tot"` + CliAbrt string `csv:"cli_abrt"` + SrvAbrt string `csv:"srv_abrt"` + CompIn string `csv:"comp_in"` + CompOut string `csv:"comp_out"` + CompByp string `csv:"comp_byp"` + CompRsp string `csv:"comp_rsp"` + LastSess string `csv:"lastsess"` + LastChk string `csv:"last_chk"` + LastAgt string `csv:"last_agt"` + Qtime string `csv:"qtime"` + Ctime string `csv:"ctime"` + Rtime string `csv:"rtime"` + Ttime string `csv:"ttime"` +} + +type Info struct { + Name string `mapstructure:"Name"` + Version string `mapstructure:"Version"` + ReleaseDate string `mapstructure:"Release_date"` + Nbproc string `mapstructure:"Nbproc"` + ProcessNum string `mapstructure:"Process_num"` + Pid string `mapstructure:"Pid"` + Uptime string `mapstructure:"Uptime"` + UptimeSec string `mapstructure:"Uptime_sec"` + MemMax string `mapstructure:"Memmax_MB"` + UlimitN string `mapstructure:"Ulimit-n"` + Maxsock string `mapstructure:"Maxsock"` + Maxconn string `mapstructure:"Maxconn"` + HardMaxconn string `mapstructure:"Hard_maxconn"` + CurrConns string `mapstructure:"CurrConns"` + CumConns string `mapstructure:"CumConns"` + CumReq string `mapstructure:"CumReq"` + MaxSslConns string `mapstructure:"MaxSslConns"` + CurrSslConns string `mapstructure:"CurrSslConns"` + CumSslConns string `mapstructure:"CumSslConns"` + Maxpipes string `mapstructure:"Maxpipes"` + PipesUsed string `mapstructure:"PipesUsed"` + PipesFree string `mapstructure:"PipesFree"` + ConnRate string `mapstructure:"ConnRate"` + ConnRateLimit string `mapstructure:"ConnRateLimit"` + MaxConnRate string `mapstructure:"MaxConnRate"` + SessRate string `mapstructure:"SessRate"` + SessRateLimit string `mapstructure:"SessRateLimit"` + MaxSessRate string `mapstructure:"MaxSessRate"` + SslRate string `mapstructure:"SslRate"` + SslRateLimit string `mapstructure:"SslRateLimit"` + MaxSslRate string `mapstructure:"MaxSslRate"` + SslFrontendKeyRate string `mapstructure:"SslFrontendKeyRate"` + SslFrontendMaxKeyRate string `mapstructure:"SslFrontendMaxKeyRate"` + SslFrontendSessionReusePct string `mapstructure:"SslFrontendSessionReuse_pct"` + SslBackendKeyRate string `mapstructure:"SslBackendKeyRate"` + SslBackendMaxKeyRate string `mapstructure:"SslBackendMaxKeyRate"` + SslCacheLookups string `mapstructure:"SslCacheLookups"` + SslCacheMisses string `mapstructure:"SslCacheMisses"` + CompressBpsIn string `mapstructure:"CompressBpsIn"` + CompressBpsOut string `mapstructure:"CompressBpsOut"` + CompressBpsRateLim string `mapstructure:"CompressBpsRateLim"` + ZlibMemUsage string `mapstructure:"ZlibMemUsage"` + MaxZlibMemUsage string `mapstructure:"MaxZlibMemUsage"` + Tasks string `mapstructure:"Tasks"` + RunQueue string `mapstructure:"Run_queue"` + IdlePct string `mapstructure:"Idle_pct"` + Node string `mapstructure:"Node"` + Description string `mapstructure:"Description"` +} + +// Client is an instance of the HAProxy client +type Client struct { + Address string + ProtoScheme string +} + +// NewHaproxyClient returns a new instance of HaproxyClient +func NewHaproxyClient(address string) (*Client, error) { + parts := strings.Split(address, "://") + if len(parts) != 2 { + return nil, errors.New("must have protocol scheme and address") + } + + if parts[0] != "tcp" && parts[0] != "unix" { + return nil, errors.New("invalid protocol scheme") + } + + return &Client{ + Address: parts[1], + ProtoScheme: parts[0], + }, nil +} + +// Run sends a designated command to the haproxy stats socket +func (c *Client) run(cmd string) (*bytes.Buffer, error) { + var conn net.Conn + response := bytes.NewBuffer(nil) + + conn, err := net.Dial(c.ProtoScheme, c.Address) + if err != nil { + return response, err + } + + defer conn.Close() + + _, err = conn.Write([]byte(cmd + "\n")) + if err != nil { + return response, err + } + + _, err = io.Copy(response, conn) + if err != nil { + return response, err + } + + if strings.HasPrefix(response.String(), "Unknown command") { + return response, fmt.Errorf("unknown command: %s", cmd) + } + + return response, nil +} + +// GetStat returns the result from the 'show stat' command +func (c *Client) GetStat() ([]*Stat, error) { + + runResult, err := c.run("show stat") + if err != nil { + return nil, err + } + + var statRes []*Stat + csvReader := csv.NewReader(runResult) + csvReader.TrailingComma = true + + err = gocsv.UnmarshalCSV(csvReader, &statRes) + if err != nil { + return nil, fmt.Errorf("error parsing CSV: %s", err) + } + + return statRes, nil + +} + +// GetInfo returns the result from the 'show stat' command +func (c *Client) GetInfo() (*Info, error) { + + res, err := c.run("show info") + if err != nil { + return nil, err + } + + if b, err := ioutil.ReadAll(res); err == nil { + + resultMap := map[string]interface{}{} + + for _, ln := range strings.Split(string(b), "\n") { + + ln := strings.TrimSpace(ln) + if ln == "" { + continue + } + + parts := strings.Split(ln, ":") + if len(parts) != 2 { + continue + } + + resultMap[parts[0]] = strings.TrimSpace(parts[1]) + } + + var result *Info + + if err := mapstructure.Decode(resultMap, &result); err != nil { + return nil, err + } + return result, nil + } + + return nil, err + +} diff --git a/metricbeat/module/haproxy/info/_meta/data.json b/metricbeat/module/haproxy/info/_meta/data.json new file mode 100644 index 000000000000..c21cbc98f2ef --- /dev/null +++ b/metricbeat/module/haproxy/info/_meta/data.json @@ -0,0 +1,59 @@ +{ + "@timestamp": "2016-08-13T19:44:21.036Z", + "beat": { + "hostname": "beathost", + "name": "beathost" + }, + "haproxy": { + "info": { + "compress_bps_in": 0, + "compress_bps_out": 0, + "compress_bps_rate_limit": 0, + "conn_rate": 0, + "conn_rate_limit": 0, + "cum_conns": 67, + "cum_req": 67, + "cum_ssl_conns": 0, + "curr_conns": 0, + "curr_ssl_conns": 0, + "hard_max_conn": 4000, + "idle_pct": 100, + "max_conn": 4000, + "max_conn_rate": 5, + "max_pipes": 0, + "max_sess_rate": 5, + "max_sock": 8033, + "max_ssl_conns": 0, + "max_ssl_rate": 0, + "max_zlib_mem_usage": 0, + "mem_max_mb": 0, + "nb_proc": 1, + "pid": 53858, + "pipes_free": 0, + "pipes_used": 0, + "process_num": 1, + "run_queue": 2, + "sess_rate": 0, + "sess_rate_limit": 0, + "ssl_babckend_key_rate": 0, + "ssl_backend_max_key_rate": 0, + "ssl_cache_misses": 0, + "ssl_cached_lookups": 0, + "ssl_frontend_key_rate": 0, + "ssl_frontend_max_key_rate": 0, + "ssl_frontend_session_reuse_pct": 0, + "ssl_rate": 0, + "ssl_rate_limit": 0, + "tasks": 7, + "ulimit_n": 8033, + "uptime_sec": 13700, + "zlib_mem_usage": 0 + } + }, + "metricset": { + "module": "haproxy", + "name": "info", + "rtt": 707 + }, + "type": "metricsets" +} \ No newline at end of file diff --git a/metricbeat/module/haproxy/info/_meta/docs.asciidoc b/metricbeat/module/haproxy/info/_meta/docs.asciidoc new file mode 100644 index 000000000000..6725bc19c531 --- /dev/null +++ b/metricbeat/module/haproxy/info/_meta/docs.asciidoc @@ -0,0 +1,3 @@ +=== haproxy stat MetricSet + +This is the info metricset of the module haproxy. diff --git a/metricbeat/module/haproxy/info/_meta/fields.yml b/metricbeat/module/haproxy/info/_meta/fields.yml new file mode 100644 index 000000000000..4eeea5bfb029 --- /dev/null +++ b/metricbeat/module/haproxy/info/_meta/fields.yml @@ -0,0 +1,252 @@ +- name: info + type: group + description: > + General infomration collected on HAProxy process + fields: + - name: nb_proc + type: integer + description: > + Number of processes + + - name: process_num + type: integer + description: > + Process number + + - name: pid + type: integer + description: > + Process ID + + - name: uptime_sec + type: integer + description: > + Current uptime in seconds + + - name: mem_max_bytes + type: integer + format: bytes + description: > + Max number of memory usage in bytes (The 'Memmax_MB' value converted to bytes) + + - name: ulimit_n + type: integer + description: > + Max number of open files for process + + - name: compress + type: group + description: > + + fields: + - name: bps + type: group + description: > + + fields: + - name: in + type: integer + description: > + + - name: out + type: integer + description: > + + - name: rate_limit + type: integer + description: > + + - name: conn + type: group + description: > + + fields: + - name: rate + type: group + description: > + + fields: + - name: value + type: integer + description: > + + - name: limit + type: integer + description: > + + - name: curr + type: group + description: > + + fields: + - name: conns + type: integer + description: > + + - name: ssl_conns + type: integer + description: > + + - name: cum + type: group + description: > + + fields: + - name: conns + type: integer + description: > + + - name: req + type: integer + description: > + + - name: ssl_conns + type: integer + description: > + + + - name: max + type: group + description: > + + fields: + - name: hard_conn + type: integer + description: > + + - name: sock + type: integer + description: > + + - name: conn + type: integer + description: > + + - name: ssl_conns + type: integer + description: > + + - name: pipes + type: integer + description: > + + - name: conn_rate + type: integer + description: > + + - name: sess_rate + type: integer + description: > + + - name: ssl_rate + type: integer + description: > + + - name: zlib_mem_usage + type: integer + description: > + + - name: pipes + type: group + description: > + + fields: + - name: used + type: integer + description: > + + - name: free + type: integer + description: > + + + - name: sess + type: group + description: > + + fields: + - name: rate + type: group + description: > + + fields: + - name: value + type: integer + description: > + + - name: limit + type: integer + description: > + + - name: ssl + type: group + description: > + + fields: + - name: rate + type: group + description: > + + fields: + - name: value + type: integer + description: > + + - name: limit + type: integer + description: > + + - name: frontend + type: group + description: > + + fields: + - name: key_rate + type: integer + description: > + + - name: max_key_rate + type: integer + description: > + + - name: session_reuse_pct + type: integer + description: > + + - name: backend + type: group + description: > + + fields: + - name: key_rate + type: integer + description: > + + - name: max_key_rate + type: integer + description: > + + - name: ssl_cached_lookups + type: integer + description: > + + - name: ssl_cache_misses + type: integer + description: > + + - name: zlib_mem_usage + type: integer + description: > + + - name: tasks + type: integer + description: > + + - name: run_queue + type: integer + description: > + + - name: idle_pct + type: scaled_float + format: percent + description: > diff --git a/metricbeat/module/haproxy/info/data.go b/metricbeat/module/haproxy/info/data.go new file mode 100644 index 000000000000..97f2d709d20d --- /dev/null +++ b/metricbeat/module/haproxy/info/data.go @@ -0,0 +1,134 @@ +package info + +import ( + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/metricbeat/module/haproxy" + s "github.com/elastic/beats/metricbeat/schema" + c "github.com/elastic/beats/metricbeat/schema/mapstrstr" + + "reflect" + "strconv" + "strings" +) + +var ( + schema = s.Schema{ + "nb_proc": c.Int("Nbproc"), + "process_num": c.Int("ProcessNum"), + "pid": c.Int("Pid"), + "uptime_sec": c.Int("UptimeSec"), + "mem_max_bytes": c.Int("MemMax"), + "ulimit_n": c.Int("UlimitN"), + + "compress": s.Object{ + "bps": s.Object{ + "in": c.Int("CompressBpsIn"), + "out": c.Int("CompressBpsOut"), + "rate_limit": c.Int("CompressBpsRateLim"), + }, + }, + + "conn": s.Object{ + "rate": s.Object{ + "value": c.Int("ConnRate"), + "limit": c.Int("ConnRateLimit"), + }, + }, + + "curr": s.Object{ + "conns": c.Int("CurrConns"), + "ssl_conns": c.Int("CurrSslConns"), + }, + + "cum": s.Object{ + "conns": c.Int("CumConns"), + "req": c.Int("CumReq"), + "ssl_conns": c.Int("CumSslConns"), + }, + + "max": s.Object{ + "hard_conn": c.Int("HardMaxconn"), + "ssl": s.Object{ + "conns": c.Int("MaxSslConns"), + "rate": c.Int("MaxSslRate"), + }, + "sock": c.Int("Maxsock"), + "conn": s.Object{ + "value": c.Int("Maxconn"), + "rate": c.Int("MaxConnRate"), + }, + "sess_rate": c.Int("MaxSessRate"), + "pipes": c.Int("Maxpipes"), + "zlib_mem_usage": c.Int("MaxZlibMemUsage"), + }, + + "pipes": s.Object{ + "used": c.Int("PipesUsed"), + "free": c.Int("PipesFree"), + }, + + "sess": s.Object{ + "rate": s.Object{ + "value": c.Int("SessRate"), + "limit": c.Int("SessRateLimit"), + }, + }, + + "ssl": s.Object{ + "rate": s.Object{ + "value": c.Int("SslRate"), + "limit": c.Int("SslRateLimit"), + }, + "frontend": s.Object{ + "key_rate": c.Int("SslFrontendKeyRate"), + "max_key_rate": c.Int("SslFrontendMaxKeyRate"), + "session_reuse_pct": c.Int("SslFrontendSessionReusePct"), + }, + "backend": s.Object{ + "key_rate": c.Int("SslBackendKeyRate"), + "max_key_rate": c.Int("SslBackendMaxKeyRate"), + }, + "cached_lookups": c.Int("SslCacheLookups"), + "cache_misses": c.Int("SslCacheMisses"), + }, + + "zlib_mem_usage": c.Int("ZlibMemUsage"), + "tasks": c.Int("Tasks"), + "run_queue": c.Int("RunQueue"), + "idle_pct": c.Float("IdlePct"), + } +) + +// Map data to MapStr +func eventMapping(info *haproxy.Info) (common.MapStr, error) { + // Full mapping from info + + st := reflect.ValueOf(info).Elem() + typeOfT := st.Type() + source := map[string]interface{}{} + + for i := 0; i < st.NumField(); i++ { + f := st.Field(i) + + if typeOfT.Field(i).Name == "IdlePct" { + // Convert this value to a float between 0.0 and 1.0 + fval, err := strconv.ParseFloat(f.Interface().(string), 64) + if err != nil { + return nil, err + } + source[typeOfT.Field(i).Name] = strconv.FormatFloat(fval/float64(100), 'f', 2, 64) + } else if typeOfT.Field(i).Name == "Memmax_MB" { + // Convert this value to bytes + val, err := strconv.Atoi(strings.TrimSpace(f.Interface().(string))) + if err != nil { + return nil, err + } + source[typeOfT.Field(i).Name] = strconv.Itoa((val * 1024 * 1024)) + } else { + source[typeOfT.Field(i).Name] = f.Interface() + } + + } + + return schema.Apply(source), nil +} diff --git a/metricbeat/module/haproxy/info/info.go b/metricbeat/module/haproxy/info/info.go new file mode 100644 index 000000000000..0801ab78a173 --- /dev/null +++ b/metricbeat/module/haproxy/info/info.go @@ -0,0 +1,86 @@ +package info + +import ( + "fmt" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/module/haproxy" +) + +const ( + // defaultSocket is the default path to the unix socket tfor stats on haproxy. + statsMethod = "info" + defaultAddr = "unix:///var/lib/haproxy/stats" +) + +var ( + debugf = logp.MakeDebug("haproxy-info") +) + +// init registers the MetricSet with the central registry. +// The New method will be called after the setup of the module and before starting to fetch data +func init() { + if err := mb.Registry.AddMetricSet("haproxy", "info", New); err != nil { + panic(err) + } +} + +// MetricSet type defines all fields of the MetricSet +// As a minimum it must inherit the mb.BaseMetricSet fields, but can be extended with +// additional entries. These variables can be used to persist data or configuration between +// multiple fetch calls. +type MetricSet struct { + mb.BaseMetricSet + statsAddr string + counter int +} + +// New create a new instance of the MetricSet +// Part of new is also setting up the configuration by processing additional +// configuration entries if needed. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + + logp.Warn("EXPERIMENTAL: The haproxy info metricset is experimental") + + config := struct { + StatsAddr string `config:"stats_addr"` + }{ + StatsAddr: defaultAddr, + } + + if err := base.Module().UnpackConfig(&config); err != nil { + return nil, err + } + + return &MetricSet{ + BaseMetricSet: base, + statsAddr: config.StatsAddr, + counter: 1, + }, nil +} + +// Fetch methods implements the data gathering and data conversion to the right format +// It returns the event which is then forward to the output. In case of an error, a +// descriptive error must be returned. +func (m *MetricSet) Fetch() (common.MapStr, error) { + + hapc, err := haproxy.NewHaproxyClient(m.statsAddr) + if err != nil { + return nil, fmt.Errorf("HAProxy Client error: %s", err) + } + + res, err := hapc.GetInfo() + + if err != nil { + return nil, fmt.Errorf("HAProxy Client error fetching %s: %s", statsMethod, err) + } + m.counter++ + + mappedEvent, err := eventMapping(res) + if err != nil { + return nil, err + } + return mappedEvent, nil + +} diff --git a/metricbeat/module/haproxy/stat/_meta/data.json b/metricbeat/module/haproxy/stat/_meta/data.json new file mode 100644 index 000000000000..ab75ae23ebd7 --- /dev/null +++ b/metricbeat/module/haproxy/stat/_meta/data.json @@ -0,0 +1,61 @@ +{ + "@timestamp": "2016-08-13T19:44:21.036Z", + "beat": { + "hostname": "beathost", + "name": "beathost" + }, + "haproxy": { + "stat": { + "act": 1, + "bck": 0, + "bin": 0, + "bout": 0, + "check_duration": 0, + "check_status": "L4CON", + "chkdown": 1, + "chkfail": 1, + "cli_abrt": 0, + "ctime": 0, + "downtime": 13700, + "dresp": 0, + "econ": 0, + "eresp": 0, + "hanafail": 0, + "hrsp_1xx": 0, + "hrsp_2xx": 0, + "hrsp_3xx": 0, + "hrsp_4xx": 0, + "hrsp_5xx": 0, + "hrsp_other": 0, + "iid": 3, + "last_chk": "Connection refused", + "lastchg": 13700, + "lastsess": -1, + "lbtot": 0, + "pid": 1, + "qcur": 0, + "qmax": 0, + "qtime": 0, + "rate": 0, + "rate_max": 0, + "rtime": 0, + "scur": 0, + "sid": 1, + "smax": 0, + "srv_abrt": 0, + "status": "DOWN", + "stot": 0, + "svname": "log1", + "ttime": 0, + "weight": 1, + "wredis": 0, + "wretr": 0 + } + }, + "metricset": { + "module": "haproxy", + "name": "stat", + "rtt": 2118 + }, + "type": "" +} \ No newline at end of file diff --git a/metricbeat/module/haproxy/stat/_meta/docs.asciidoc b/metricbeat/module/haproxy/stat/_meta/docs.asciidoc new file mode 100644 index 000000000000..52c2085e868f --- /dev/null +++ b/metricbeat/module/haproxy/stat/_meta/docs.asciidoc @@ -0,0 +1,133 @@ +=== haproxy stat MetricSet + +This is the info metricset of the module haproxy. + + +=== Official documentation for fields + +A list of the 'stat' fields along with a basic description can be found here: + +http://www.haproxy.org/download/1.6/doc/management.txt + +The following documentation bellow is an extract from the URL above, more specifically from section "9.1. CSV format" + +[source] +----------------- +In brackets after each field name are the types which may have a value for +that field. The types are L (Listeners), F (Frontends), B (Backends), and +S (Servers). + + 0. pxname [LFBS]: proxy name + 1. svname [LFBS]: service name (FRONTEND for frontend, BACKEND for backend, + any name for server/listener) + 2. qcur [..BS]: current queued requests. For the backend this reports the + number queued without a server assigned. + 3. qmax [..BS]: max value of qcur + 4. scur [LFBS]: current sessions + 5. smax [LFBS]: max sessions + 6. slim [LFBS]: configured session limit + 7. stot [LFBS]: cumulative number of connections + 8. bin [LFBS]: bytes in + 9. bout [LFBS]: bytes out + 10. dreq [LFB.]: requests denied because of security concerns. + - For tcp this is because of a matched tcp-request content rule. + - For http this is because of a matched http-request or tarpit rule. + 11. dresp [LFBS]: responses denied because of security concerns. + - For http this is because of a matched http-request rule, or + "option checkcache". + 12. ereq [LF..]: request errors. Some of the possible causes are: + - early termination from the client, before the request has been sent. + - read error from the client + - client timeout + - client closed connection + - various bad requests from the client. + - request was tarpitted. + 13. econ [..BS]: number of requests that encountered an error trying to + connect to a backend server. The backend stat is the sum of the stat + for all servers of that backend, plus any connection errors not + associated with a particular server (such as the backend having no + active servers). + 14. eresp [..BS]: response errors. srv_abrt will be counted here also. + Some other errors are: + - write error on the client socket (won't be counted for the server stat) + - failure applying filters to the response. + 15. wretr [..BS]: number of times a connection to a server was retried. + 16. wredis [..BS]: number of times a request was redispatched to another + server. The server value counts the number of times that server was + switched away from. + 17. status [LFBS]: status (UP/DOWN/NOLB/MAINT/MAINT(via)...) + 18. weight [..BS]: total weight (backend), server weight (server) + 19. act [..BS]: number of active servers (backend), server is active (server) + 20. bck [..BS]: number of backup servers (backend), server is backup (server) + 21. chkfail [...S]: number of failed checks. (Only counts checks failed when + the server is up.) + 22. chkdown [..BS]: number of UP->DOWN transitions. The backend counter counts + transitions to the whole backend being down, rather than the sum of the + counters for each server. + 23. lastchg [..BS]: number of seconds since the last UP<->DOWN transition + 24. downtime [..BS]: total downtime (in seconds). The value for the backend + is the downtime for the whole backend, not the sum of the server downtime. + 25. qlimit [...S]: configured maxqueue for the server, or nothing in the + value is 0 (default, meaning no limit) + 26. pid [LFBS]: process id (0 for first instance, 1 for second, ...) + 27. iid [LFBS]: unique proxy id + 28. sid [L..S]: server id (unique inside a proxy) + 29. throttle [...S]: current throttle percentage for the server, when + slowstart is active, or no value if not in slowstart. + 30. lbtot [..BS]: total number of times a server was selected, either for new + sessions, or when re-dispatching. The server counter is the number + of times that server was selected. + 31. tracked [...S]: id of proxy/server if tracking is enabled. + 32. type [LFBS]: (0=frontend, 1=backend, 2=server, 3=socket/listener) + 33. rate [.FBS]: number of sessions per second over last elapsed second + 34. rate_lim [.F..]: configured limit on new sessions per second + 35. rate_max [.FBS]: max number of new sessions per second + 36. check_status [...S]: status of last health check, one of: + UNK -> unknown + INI -> initializing + SOCKERR -> socket error + L4OK -> check passed on layer 4, no upper layers testing enabled + L4TOUT -> layer 1-4 timeout + L4CON -> layer 1-4 connection problem, for example + "Connection refused" (tcp rst) or "No route to host" (icmp) + L6OK -> check passed on layer 6 + L6TOUT -> layer 6 (SSL) timeout + L6RSP -> layer 6 invalid response - protocol error + L7OK -> check passed on layer 7 + L7OKC -> check conditionally passed on layer 7, for example 404 with + disable-on-404 + L7TOUT -> layer 7 (HTTP/SMTP) timeout + L7RSP -> layer 7 invalid response - protocol error + L7STS -> layer 7 response error, for example HTTP 5xx + 37. check_code [...S]: layer5-7 code, if available + 38. check_duration [...S]: time in ms took to finish last health check + 39. hrsp_1xx [.FBS]: http responses with 1xx code + 40. hrsp_2xx [.FBS]: http responses with 2xx code + 41. hrsp_3xx [.FBS]: http responses with 3xx code + 42. hrsp_4xx [.FBS]: http responses with 4xx code + 43. hrsp_5xx [.FBS]: http responses with 5xx code + 44. hrsp_other [.FBS]: http responses with other codes (protocol error) + 45. hanafail [...S]: failed health checks details + 46. req_rate [.F..]: HTTP requests per second over last elapsed second + 47. req_rate_max [.F..]: max number of HTTP requests per second observed + 48. req_tot [.F..]: total number of HTTP requests received + 49. cli_abrt [..BS]: number of data transfers aborted by the client + 50. srv_abrt [..BS]: number of data transfers aborted by the server + (inc. in eresp) + 51. comp_in [.FB.]: number of HTTP response bytes fed to the compressor + 52. comp_out [.FB.]: number of HTTP response bytes emitted by the compressor + 53. comp_byp [.FB.]: number of bytes that bypassed the HTTP compressor + (CPU/BW limit) + 54. comp_rsp [.FB.]: number of HTTP responses that were compressed + 55. lastsess [..BS]: number of seconds since last session assigned to + server/backend + 56. last_chk [...S]: last health check contents or textual error + 57. last_agt [...S]: last agent check contents or textual error + 58. qtime [..BS]: the average queue time in ms over the 1024 last requests + 59. ctime [..BS]: the average connect time in ms over the 1024 last requests + 60. rtime [..BS]: the average response time in ms over the 1024 last requests + (0 for TCP) + 61. ttime [..BS]: the average total session time in ms over the 1024 last + requests + +----------------- \ No newline at end of file diff --git a/metricbeat/module/haproxy/stat/_meta/fields.yml b/metricbeat/module/haproxy/stat/_meta/fields.yml new file mode 100644 index 000000000000..c309b879ce33 --- /dev/null +++ b/metricbeat/module/haproxy/stat/_meta/fields.yml @@ -0,0 +1,393 @@ +- name: stat + type: group + description: > + Stats collected from HAProxy process + fields: + - name: pxname + type: string + description: > + proxy name + + - name: svname + type: string + description: > + service name (FRONTEND for frontend, BACKEND for backend, any name for server/listener) + + - name: qcur + type: integer + description: > + current queued requests. For the backend this reports the number queued without a server assigned + + - name: qmax + type: integer + description: > + max value of qcur + + - name: scur + type: integer + description: > + current sessions + + - name: smax + type: integer + description: > + max sessions + + - name: slim + type: integer + description: > + configured session limit + + - name: stot + type: string + description: > + cumulative number of connections + + - name: bin + type: integer + description: > + bytes in + + - name: bout + type: integer + description: > + bytes out + + - name: dreq + type: integer + description: > + requests denied because of security concerns. + * For tcp this is because of a matched tcp-request content rule. + * For http this is because of a matched http-request or tarpit rule. + + - name: dresp + type: integer + description: > + responses denied because of security concerns. + * For http this is because of a matched http-request rule, or "option checkcache". + + - name: ereq + type: integer + description: > + request errors. Some of the possible causes are: + * early termination from the client, before the request has been sent. + * read error from the client + * client timeout + * client closed connection + * various bad requests from the client. + * request was tarpitted. + + - name: econ + type: integer + description: > + number of requests that encountered an error trying to + connect to a backend server. The backend stat is the sum of the stat + for all servers of that backend, plus any connection errors not + associated with a particular server (such as the backend having no + active servers). + + - name: eresp + type: integer + description: > + response errors. srv_abrt will be counted here also. + Some other errors are: + * write error on the client socket (won't be counted for the server stat) + * failure applying filters to the response. + + - name: wretr + type: integer + description: > + number of times a connection to a server was retried. + + - name: wredis + type: integer + description: > + number of times a request was redispatched to another + server. The server value counts the number of times that server was + switched away from. + + - name: status + type: string + description: > + status (UP/DOWN/NOLB/MAINT/MAINT(via)...) + + - name: weight + type: integer + description: > + total weight (backend), server weight (server) + + - name: act + type: integer + description: > + number of active servers (backend), server is active (server) + + - name: bck + type: integer + description: > + number of backup servers (backend), server is backup (server) + + - name: chkfail + type: integer + description: > + number of failed checks. (Only counts checks failed when + the server is up.) + + - name: chkdown + type: integer + description: > + number of UP->DOWN transitions. The backend counter counts + transitions to the whole backend being down, rather than the sum of the + counters for each server. + + - name: lastchg + type: integer + description: > + number of seconds since the last UP<->DOWN transition + + - name: downtime + type: integer + description: > + total downtime (in seconds). The value for the backend + is the downtime for the whole backend, not the sum of the server downtime. + + - name: qlimit + type: integer + description: > + configured maxqueue for the server, or nothing in the + value is 0 (default, meaning no limit) + + - name: pid + type: integer + description: > + process id (0 for first instance, 1 for second, ...) + + - name: iid + type: integer + description: > + unique proxy id + + - name: sid + type: integer + description: > + server id (unique inside a proxy) + + - name: throttle + type: integer + description: > + current throttle percentage for the server, when + slowstart is active, or no value if not in slowstart. + + - name: lbtot + type: integer + description: > + total number of times a server was selected, either for new + sessions, or when re-dispatching. The server counter is the number + of times that server was selected. + + - name: tracked + type: integer + description: > + id of proxy/server if tracking is enabled. + + - name: component_type + type: integer + description: > + (0=frontend, 1=backend, 2=server, 3=socket/listener) + + - name: rate + type: group + description: > + + fields: + - name: value + type: integer + description: > + number of sessions per second over last elapsed second + + - name: lim + type: integer + description: > + configured limit on new sessions per second + + - name: max + type: integer + description: > + max number of new sessions per second + + + - name: check + type: group + description: > + + fields: + - name: status + type: string + description: > + status of last health check, one of: + UNK -> unknown + INI -> initializing + SOCKERR -> socket error + L4OK -> check passed on layer 4, no upper layers testing enabled + L4TOUT -> layer 1-4 timeout + L4CON -> layer 1-4 connection problem, for example + "Connection refused" (tcp rst) or "No route to host" (icmp) + L6OK -> check passed on layer 6 + L6TOUT -> layer 6 (SSL) timeout + L6RSP -> layer 6 invalid response - protocol error + L7OK -> check passed on layer 7 + L7OKC -> check conditionally passed on layer 7, for example 404 with + disable-on-404 + L7TOUT -> layer 7 (HTTP/SMTP) timeout + L7RSP -> layer 7 invalid response - protocol error + L7STS -> layer 7 response error, for example HTTP 5xx + + - name: code + type: integer + description: > + layer5-7 code, if available + + - name: duration + type: integer + description: > + time in ms took to finish last health check + + - name: hrsp + type: group + description: > + + fields: + - name: 1xx + type: integer + description: > + http responses with 1xx code + + - name: 2xx + type: integer + description: > + http responses with 2xx code + + - name: 3xx + type: integer + description: > + http responses with 3xx code + + - name: 4xx + type: integer + description: > + http responses with 4xx code + + - name: 5xx + type: integer + description: > + http responses with 5xx code + + - name: other + type: integer + description: > + http responses with other codes (protocol error) + + - name: hanafail + type: integer + description: > + failed health checks details + + - name: req + type: group + description: > + + fields: + - name: rate + type: group + description: > + + fields: + - name: value + type: integer + description: > + HTTP requests per second over last elapsed second + + - name: max + type: integer + description: > + max number of HTTP requests per second observed + + - name: tot + type: integer + description: > + total number of HTTP requests received + + - name: cli_abrt + type: integer + description: > + number of data transfers aborted by the client + + - name: srv_abrt + type: integer + description: > + number of data transfers aborted by the server (inc. in eresp) + + - name: comp + type: group + description: > + + fields: + - name: in + type: integer + description: > + number of HTTP response bytes fed to the compressor + + - name: out + type: integer + description: > + number of HTTP response bytes emitted by the compressor + + - name: byp + type: integer + description: > + number of bytes that bypassed the HTTP compressor (CPU/BW limit) + + - name: rsp + type: integer + description: > + number of HTTP responses that were compressed + + - name: last + type: group + description: > + + fields: + - name: sess + type: integer + description: > + number of seconds since last session assigned to server/backend + + - name: chk + type: string + description: > + last health check contents or textual error + + - name: agt + type: string + description: > + last agent check contents or textual error + + + - name: qtime + type: integer + description: > + the average queue time in ms over the 1024 last requests + + - name: ctime + type: integer + description: > + the average connect time in ms over the 1024 last requests + + - name: rtime + type: integer + description: > + the average response time in ms over the 1024 last requests (0 for TCP) + + - name: ttime + type: integer + description: > + the average total session time in ms over the 1024 last requests \ No newline at end of file diff --git a/metricbeat/module/haproxy/stat/data.go b/metricbeat/module/haproxy/stat/data.go new file mode 100644 index 000000000000..ae4bb7da2c55 --- /dev/null +++ b/metricbeat/module/haproxy/stat/data.go @@ -0,0 +1,160 @@ +package stat + +import ( + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/metricbeat/module/haproxy" + s "github.com/elastic/beats/metricbeat/schema" + c "github.com/elastic/beats/metricbeat/schema/mapstrstr" + "reflect" + "strings" +) + +var ( + schema = s.Schema{ + "pxname": c.Str("PxName"), + "svname": c.Str("SvName"), + "qcur": c.Int("Qcur"), + "qmax": c.Int("Qmax"), + "scur": c.Int("Scur"), + "smax": c.Int("Smax"), + "slim": c.Int("Slim"), + "stot": c.Int("Stot"), + "bin": c.Int("Bin"), + "bout": c.Int("Bout"), + "dreq": c.Int("Dreq"), + "dresp": c.Int("Dresp"), + "ereq": c.Int("Ereq"), + "econ": c.Int("Econ"), + "eresp": c.Int("Eresp"), + "wretr": c.Int("Wretr"), + "wredis": c.Int("Wredis"), + "status": c.Str("Status"), + "weight": c.Int("Weight"), + "act": c.Int("Act"), + "bck": c.Int("Bck"), + "chkfail": c.Int("ChkFail"), + "chkdown": c.Int("ChkDown"), + "lastchg": c.Int("Lastchg"), + "downtime": c.Int("Downtime"), + "qlimit": c.Int("Qlimit"), + "pid": c.Int("Pid"), + "iid": c.Int("Iid"), + "sid": c.Int("Sid"), + "throttle": c.Int("Throttle"), + "lbtot": c.Int("Lbtot"), + "tracked": c.Int("Tracked"), + "component_type": c.Int("Type"), + + "rate": s.Object{ + "value": c.Int("Rate"), + "lim": c.Int("RateLim"), + "max": c.Int("RateMax"), + }, + + "check": s.Object{ + "status": c.Str("CheckStatus"), + "code": c.Int("CheckCode"), + "duration": c.Int("CheckDuration"), + }, + + "hrsp": s.Object{ + "1xx": c.Int("Hrsp1xx"), + "2xx": c.Int("Hrsp2xx"), + "3xx": c.Int("Hrsp3xx"), + "4xx": c.Int("Hrsp4xx"), + "5xx": c.Int("Hrsp5xx"), + "other": c.Int("HrspOther"), + }, + + "hanafail": c.Int("Hanafail"), + + "req": s.Object{ + "rate": s.Object{ + "value": c.Int("ReqRate"), + "max": c.Int("ReqRateMax"), + }, + "tot": c.Int("ReqTot"), + }, + + "cli_abrt": c.Int("CliAbrt"), + "srv_abrt": c.Int("SrvAbrt"), + + "comp": s.Object{ + "in": c.Int("CompIn"), + "out": c.Int("CompOut"), + "byp": c.Int("CompByp"), + "rsp": c.Int("CompRsp"), + }, + + "last": s.Object{ + "sess": c.Int("LastSess"), + "chk": c.Str("LastChk"), + "agt": c.Str("LastAgt"), + }, + + "qtime": c.Int("Qtime"), + "ctime": c.Int("Ctime"), + "rtime": c.Int("Rtime"), + "ttime": c.Int("Ttime"), + } +) + +func parseResponse(data []byte) []map[string]string { + + var results []map[string]string + + str := string(data) + fieldNames := []string{} + + for lnNum, ln := range strings.Split(str, "\n") { + + // If the line by any chance is empty, then skip it + ln := strings.Trim(ln, " ") + if ln == "" { + continue + } + + // Now split the line on each comma and if there isn + ln = strings.Trim(ln, ",") + parts := strings.Split(strings.Trim(ln, " "), ",") + if len(parts) != 62 { + continue + } + + // For the first row, keep the column names and continue + if lnNum == 0 { + fieldNames = parts + continue + } + + res := map[string]string{} + for i, v := range parts { + res[fieldNames[i]] = v + } + + results = append(results, res) + + } + return results +} + +// Map data to MapStr +func eventMapping(info []*haproxy.Stat) []common.MapStr { + + var events []common.MapStr + + for _, evt := range info { + st := reflect.ValueOf(evt).Elem() + typeOfT := st.Type() + source := map[string]interface{}{} + + for i := 0; i < st.NumField(); i++ { + f := st.Field(i) + source[typeOfT.Field(i).Name] = f.Interface() + + } + events = append(events, schema.Apply(source)) + } + + return events +} diff --git a/metricbeat/module/haproxy/stat/stat.go b/metricbeat/module/haproxy/stat/stat.go new file mode 100644 index 000000000000..d350a7eec2d5 --- /dev/null +++ b/metricbeat/module/haproxy/stat/stat.go @@ -0,0 +1,82 @@ +package stat + +import ( + "fmt" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/module/haproxy" +) + +const ( + // defaultSocket is the default path to the unix socket for stats on haproxy. + statsMethod = "stat" + defaultAddr = "unix:///var/lib/haproxy/stats" +) + +var ( + debugf = logp.MakeDebug("haproxy-stat") +) + +// init registers the MetricSet with the central registry. +// The New method will be called after the setup of the module and before starting to fetch data +func init() { + if err := mb.Registry.AddMetricSet("haproxy", statsMethod, New); err != nil { + panic(err) + } +} + +// MetricSet type defines all fields of the MetricSet +// As a minimum it must inherit the mb.BaseMetricSet fields, but can be extended with +// additional entries. These variables can be used to persist data or configuration between +// multiple fetch calls. +type MetricSet struct { + mb.BaseMetricSet + statsAddr string + counter int +} + +// New create a new instance of the MetricSet +// Part of new is also setting up the configuration by processing additional +// configuration entries if needed. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + + logp.Warn("EXPERIMENTAL: The haproxy stat metricset is experimental") + + config := struct { + StatsAddr string `config:"stats_addr"` + }{ + StatsAddr: defaultAddr, + } + + if err := base.Module().UnpackConfig(&config); err != nil { + return nil, err + } + + return &MetricSet{ + BaseMetricSet: base, + statsAddr: config.StatsAddr, + counter: 1, + }, nil +} + +// Fetch methods implements the data gathering and data conversion to the right format +// It returns the event which is then forward to the output. In case of an error, a +// descriptive error must be returned. +func (m *MetricSet) Fetch() ([]common.MapStr, error) { + + hapc, err := haproxy.NewHaproxyClient(m.statsAddr) + if err != nil { + return nil, fmt.Errorf("HAProxy Client error: %s", err) + } + + res, err := hapc.GetStat() + + if err != nil { + return nil, fmt.Errorf("HAProxy Client error fetching %s: %s", statsMethod, err) + } + m.counter++ + + return eventMapping(res), nil + +} diff --git a/vendor/github.com/gocarina/gocsv/LICENSE b/vendor/github.com/gocarina/gocsv/LICENSE new file mode 100644 index 000000000000..052a371193e4 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Jonathan Picques + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gocarina/gocsv/README.md b/vendor/github.com/gocarina/gocsv/README.md new file mode 100644 index 000000000000..69aabca636c1 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/README.md @@ -0,0 +1,151 @@ +Go CSV +===== + +The GoCSV package aims to provide easy serialization and deserialization functions to use CSV in Golang + +API and techniques inspired from https://godoc.org/gopkg.in/mgo.v2 + +[![GoDoc](https://godoc.org/github.com/gocarina/gocsv?status.png)](https://godoc.org/github.com/gocarina/gocsv) +[![Build Status](https://travis-ci.org/gocarina/gocsv.svg?branch=master)](https://travis-ci.org/gocarina/gocsv) + +Installation +===== + +```go get -u github.com/gocarina/gocsv``` + +Full example +===== + +Consider the following CSV file + +```csv + +client_id,client_name,client_age +1,Jose,42 +2,Daniel,26 +3,Vincent,32 + +``` + +Easy binding in Go! +--- + +```go + +package main + +import ( + "fmt" + "gocsv" + "os" +) + +type Client struct { // Our example struct, you can use "-" to ignore a field + Id string `csv:"id"` + Name string `csv:"name"` + Age string `csv:"age"` + NotUsed string `csv:"-"` +} + +func main() { + clientsFile, err := os.OpenFile("clients.csv", os.O_RDWR|os.O_CREATE, os.ModePerm) + if err != nil { + panic(err) + } + defer clientsFile.Close() + + clients := []*Client{} + + if err := gocsv.UnmarshalFile(clientsFile, &clients); err != nil { // Load clients from file + panic(err) + } + for _, client := range clients { + fmt.Println("Hello", client.Name) + } + + if _, err := clientsFile.Seek(0, 0); err != nil { // Go to the start of the file + panic(err) + } + + clients = append(clients, &Client{Id: "12", Name: "John", Age: "21"}) // Add clients + clients = append(clients, &Client{Id: "13", Name: "Fred"}) + clients = append(clients, &Client{Id: "14", Name: "James", Age: "32"}) + clients = append(clients, &Client{Id: "15", Name: "Danny"}) + csvContent, err := gocsv.MarshalString(&clients) // Get all clients as CSV string + //err = gocsv.MarshalFile(&clients, clientsFile) // Use this to save the CSV back to the file + if err != nil { + panic(err) + } + fmt.Println(csvContent) // Display all clients as CSV string + +} + +``` + +Customizable Converters +--- + +```go + +type DateTime struct { + time.Time +} + +// Convert the internal date as CSV string +func (date *DateTime) MarshalCSV() (string, error) { + return date.Time.Format("20060201"), nil +} + +// You could also use the standard Stringer interface +func (date *DateTime) String() (string) { + return date.String() // Redundant, just for example +} + +// Convert the CSV string as internal date +func (date *DateTime) UnmarshalCSV(csv string) (err error) { + date.Time, err = time.Parse("20060201", csv) + if err != nil { + return err + } + return nil +} + +type Client struct { // Our example struct with a custom type (DateTime) + Id string `csv:"id"` + Name string `csv:"name"` + Employed DateTime `csv:"employed"` +} + +``` + +Customizable CSV Reader / Writer +--- + +```go + +func main() { + ... + + gocsv.SetCSVReader(func(in io.Reader) *csv.Reader { + //return csv.NewReader(in) + return gocsv.LazyCSVReader(in) // Allows use of quotes in CSV + }) + + ... + + gocsv.UnmarshalFile(file, &clients) + + ... + + gocsv.SetCSVWriter(func(out io.Writer) *csv.Writer { + return csv.NewWriter(out) + }) + + ... + + gocsv.MarshalFile(&clients, file) + + ... +} + +``` diff --git a/vendor/github.com/gocarina/gocsv/csv.go b/vendor/github.com/gocarina/gocsv/csv.go new file mode 100644 index 000000000000..7725ab1cb861 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/csv.go @@ -0,0 +1,199 @@ +// Copyright 2014 Jonathan Picques. All rights reserved. +// Use of this source code is governed by a MIT license +// The license can be found in the LICENSE file. + +// The GoCSV package aims to provide easy CSV serialization and deserialization to the golang programming language + +package gocsv + +import ( + "bytes" + "encoding/csv" + "fmt" + "io" + "os" + "reflect" + "strings" +) + +// FailIfUnmatchedStructTags indicates whether it is considered an error when there is an unmatched +// struct tag. +var FailIfUnmatchedStructTags = false + +// FailIfDoubleHeaderNames indicates whether it is considered an error when a header name is repeated +// in the csv header. +var FailIfDoubleHeaderNames = false + +// TagSeparator defines seperator string for multiple csv tags in struct fields +var TagSeparator = "," + +// -------------------------------------------------------------------------- +// CSVWriter used to format CSV + +var selfCSVWriter = DefaultCSVWriter + +// DefaultCSVWriter is the default CSV writer used to format CSV (cf. csv.NewWriter) +func DefaultCSVWriter(out io.Writer) *csv.Writer { + return csv.NewWriter(out) +} + +// SetCSVWriter sets the CSV writer used to format CSV. +func SetCSVWriter(csvWriter func(io.Writer) *csv.Writer) { + selfCSVWriter = csvWriter +} + +func getCSVWriter(out io.Writer) *csv.Writer { + return selfCSVWriter(out) +} + +// -------------------------------------------------------------------------- +// CSVReader used to parse CSV + +var selfCSVReader = DefaultCSVReader + +// DefaultCSVReader is the default CSV reader used to parse CSV (cf. csv.NewReader) +func DefaultCSVReader(in io.Reader) *csv.Reader { + return csv.NewReader(in) +} + +// LazyCSVReader returns a lazy CSV reader, with LazyQuotes and TrimLeadingSpace. +func LazyCSVReader(in io.Reader) *csv.Reader { + csvReader := csv.NewReader(in) + csvReader.LazyQuotes = true + csvReader.TrimLeadingSpace = true + return csvReader +} + +// SetCSVReader sets the CSV reader used to parse CSV. +func SetCSVReader(csvReader func(io.Reader) *csv.Reader) { + selfCSVReader = csvReader +} + +func getCSVReader(in io.Reader) *csv.Reader { + return selfCSVReader(in) +} + +// -------------------------------------------------------------------------- +// Marshal functions + +// MarshalFile saves the interface as CSV in the file. +func MarshalFile(in interface{}, file *os.File) (err error) { + return Marshal(in, file) +} + +// MarshalString returns the CSV string from the interface. +func MarshalString(in interface{}) (out string, err error) { + bufferString := bytes.NewBufferString(out) + if err := Marshal(in, bufferString); err != nil { + return "", err + } + return bufferString.String(), nil +} + +// MarshalBytes returns the CSV bytes from the interface. +func MarshalBytes(in interface{}) (out []byte, err error) { + bufferString := bytes.NewBuffer(out) + if err := Marshal(in, bufferString); err != nil { + return nil, err + } + return bufferString.Bytes(), nil +} + +// Marshal returns the CSV in writer from the interface. +func Marshal(in interface{}, out io.Writer) (err error) { + writer := getCSVWriter(out) + return writeTo(writer, in) +} + +// MarshalChan returns the CSV read from the channel. +func MarshalChan(c <-chan interface{}, out *csv.Writer) error { + return writeFromChan(out, c) +} + +// MarshalCSV returns the CSV in writer from the interface. +func MarshalCSV(in interface{}, out *csv.Writer) (err error) { + return writeTo(out, in) +} + +// -------------------------------------------------------------------------- +// Unmarshal functions + +// UnmarshalFile parses the CSV from the file in the interface. +func UnmarshalFile(in *os.File, out interface{}) (err error) { + return Unmarshal(in, out) +} + +// UnmarshalString parses the CSV from the string in the interface. +func UnmarshalString(in string, out interface{}) (err error) { + return Unmarshal(strings.NewReader(in), out) +} + +// UnmarshalBytes parses the CSV from the bytes in the interface. +func UnmarshalBytes(in []byte, out interface{}) (err error) { + return Unmarshal(bytes.NewReader(in), out) +} + +// Unmarshal parses the CSV from the reader in the interface. +func Unmarshal(in io.Reader, out interface{}) (err error) { + return readTo(newDecoder(in), out) +} + +// UnmarshalCSV parses the CSV from the reader in the interface. +func UnmarshalCSV(in *csv.Reader, out interface{}) error { + return readTo(csvDecoder{in}, out) +} + +// UnmarshalToChan parses the CSV from the reader and send each value in the chan c. +// The channel must have a concrete type. +func UnmarshalToChan(in io.Reader, c interface{}) (err error) { + return readEach(newDecoder(in), c) +} + +// UnmarshalStringToChan parses the CSV from the string and send each value in the chan c. +// The channel must have a concrete type. +func UnmarshalStringToChan(in string, c interface{}) (err error) { + return UnmarshalToChan(strings.NewReader(in), c) +} + +// UnmarshalBytesToChan parses the CSV from the bytes and send each value in the chan c. +// The channel must have a concrete type. +func UnmarshalBytesToChan(in []byte, c interface{}) (err error) { + return UnmarshalToChan(bytes.NewReader(in), c) +} + +// UnmarshalToCallback parses the CSV from the reader and send each value to the given func f. +// The func must look like func(Struct). +func UnmarshalToCallback(in io.Reader, f interface{}) (err error) { + valueFunc := reflect.ValueOf(f) + t := reflect.TypeOf(f) + if t.NumIn() != 1 { + return fmt.Errorf("the given function must have exactly one parameter") + } + c := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, t.In(0)), 0) + go func() { + err = UnmarshalToChan(in, c.Interface()) + }() + for { + if err != nil { + return err + } + v, notClosed := c.Recv() + if !notClosed || v.Interface() == nil { + break + } + valueFunc.Call([]reflect.Value{v}) + } + return +} + +// UnmarshalBytesToCallback parses the CSV from the bytes and send each value to the given func f. +// The func must look like func(Struct). +func UnmarshalBytesToCallback(in []byte, f interface{}) (err error) { + return UnmarshalToCallback(bytes.NewReader(in), f) +} + +// UnmarshalStringToCallback parses the CSV from the string and send each value to the given func f. +// The func must look like func(Struct). +func UnmarshalStringToCallback(in string, c interface{}) (err error) { + return UnmarshalToCallback(strings.NewReader(in), c) +} diff --git a/vendor/github.com/gocarina/gocsv/decode.go b/vendor/github.com/gocarina/gocsv/decode.go new file mode 100644 index 000000000000..c1abcb28133d --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/decode.go @@ -0,0 +1,274 @@ +package gocsv + +import ( + "encoding/csv" + "errors" + "fmt" + "io" + "reflect" +) + +// Decoder . +type Decoder interface { + getCSVRows() ([][]string, error) +} + +// SimpleDecoder . +type SimpleDecoder interface { + getCSVRow() ([]string, error) +} + +type decoder struct { + in io.Reader + csvDecoder *csvDecoder +} + +func newDecoder(in io.Reader) *decoder { + return &decoder{in: in} +} + +func (decode *decoder) getCSVRows() ([][]string, error) { + return getCSVReader(decode.in).ReadAll() +} + +func (decode *decoder) getCSVRow() ([]string, error) { + if decode.csvDecoder == nil { + decode.csvDecoder = &csvDecoder{getCSVReader(decode.in)} + } + return decode.csvDecoder.Read() +} + +type csvDecoder struct { + *csv.Reader +} + +func (c csvDecoder) getCSVRows() ([][]string, error) { + return c.ReadAll() +} + +func (c csvDecoder) getCSVRow() ([]string, error) { + return c.Read() +} + +func maybeMissingStructFields(structInfo []fieldInfo, headers []string) error { + if len(structInfo) == 0 { + return nil + } + + headerMap := make(map[string]struct{}, len(headers)) + for idx := range headers { + headerMap[headers[idx]] = struct{}{} + } + + for _, info := range structInfo { + found := false + for _, key := range info.keys { + if _, ok := headerMap[key]; ok { + found = true + break + } + } + if !found { + return fmt.Errorf("found unmatched struct field with tags %v", info.keys) + } + } + return nil +} + +// Check that no header name is repeated twice +func maybeDoubleHeaderNames(headers []string) error { + headerMap := make(map[string]bool, len(headers)) + for _, v := range headers { + if _, ok := headerMap[v]; ok { + return fmt.Errorf("Repeated header name: %v", v) + } + headerMap[v] = true + } + return nil +} + +func readTo(decoder Decoder, out interface{}) error { + outValue, outType := getConcreteReflectValueAndType(out) // Get the concrete type (not pointer) (Slice or Array) + if err := ensureOutType(outType); err != nil { + return err + } + outInnerWasPointer, outInnerType := getConcreteContainerInnerType(outType) // Get the concrete inner type (not pointer) (Container<"?">) + if err := ensureOutInnerType(outInnerType); err != nil { + return err + } + csvRows, err := decoder.getCSVRows() // Get the CSV csvRows + if err != nil { + return err + } + if len(csvRows) == 0 { + return errors.New("empty csv file given") + } + if err := ensureOutCapacity(&outValue, len(csvRows)); err != nil { // Ensure the container is big enough to hold the CSV content + return err + } + outInnerStructInfo := getStructInfo(outInnerType) // Get the inner struct info to get CSV annotations + if len(outInnerStructInfo.Fields) == 0 { + return errors.New("no csv struct tags found") + } + + headers := csvRows[0] + body := csvRows[1:] + + csvHeadersLabels := make(map[int]*fieldInfo, len(outInnerStructInfo.Fields)) // Used to store the correspondance header <-> position in CSV + + for i, csvColumnHeader := range headers { + if fieldInfo := getCSVFieldPosition(csvColumnHeader, outInnerStructInfo); fieldInfo != nil { + csvHeadersLabels[i] = fieldInfo + } + } + + if FailIfUnmatchedStructTags { + if err := maybeMissingStructFields(outInnerStructInfo.Fields, headers); err != nil { + return err + } + } + if FailIfDoubleHeaderNames { + if err := maybeDoubleHeaderNames(headers); err != nil { + return err + } + } + + for i, csvRow := range body { + outInner := createNewOutInner(outInnerWasPointer, outInnerType) + for j, csvColumnContent := range csvRow { + if fieldInfo, ok := csvHeadersLabels[j]; ok { // Position found accordingly to header name + if err := setInnerField(&outInner, outInnerWasPointer, fieldInfo.IndexChain, csvColumnContent); err != nil { // Set field of struct + return &csv.ParseError{ + Line: i + 2, //add 2 to account for the header & 0-indexing of arrays + Column: j + 1, + Err: err, + } + } + } + } + outValue.Index(i).Set(outInner) + } + return nil +} + +func readEach(decoder SimpleDecoder, c interface{}) error { + headers, err := decoder.getCSVRow() + if err != nil { + return err + } + outValue, outType := getConcreteReflectValueAndType(c) // Get the concrete type (not pointer) (Slice or Array) + if err := ensureOutType(outType); err != nil { + return err + } + defer outValue.Close() + outInnerWasPointer, outInnerType := getConcreteContainerInnerType(outType) // Get the concrete inner type (not pointer) (Container<"?">) + if err := ensureOutInnerType(outInnerType); err != nil { + return err + } + outInnerStructInfo := getStructInfo(outInnerType) // Get the inner struct info to get CSV annotations + if len(outInnerStructInfo.Fields) == 0 { + return errors.New("no csv struct tags found") + } + csvHeadersLabels := make(map[int]*fieldInfo, len(outInnerStructInfo.Fields)) // Used to store the correspondance header <-> position in CSV + for i, csvColumnHeader := range headers { + if fieldInfo := getCSVFieldPosition(csvColumnHeader, outInnerStructInfo); fieldInfo != nil { + csvHeadersLabels[i] = fieldInfo + } + } + if err := maybeMissingStructFields(outInnerStructInfo.Fields, headers); err != nil { + if FailIfUnmatchedStructTags { + return err + } + } + if FailIfDoubleHeaderNames { + if err := maybeDoubleHeaderNames(headers); err != nil { + return err + } + } + i := 0 + for { + line, err := decoder.getCSVRow() + if err == io.EOF { + break + } else if err != nil { + return err + } + outInner := createNewOutInner(outInnerWasPointer, outInnerType) + for j, csvColumnContent := range line { + if fieldInfo, ok := csvHeadersLabels[j]; ok { // Position found accordingly to header name + if err := setInnerField(&outInner, outInnerWasPointer, fieldInfo.IndexChain, csvColumnContent); err != nil { // Set field of struct + return &csv.ParseError{ + Line: i + 2, //add 2 to account for the header & 0-indexing of arrays + Column: j + 1, + Err: err, + } + } + } + } + outValue.Send(outInner) + i++ + } + return nil +} + +// Check if the outType is an array or a slice +func ensureOutType(outType reflect.Type) error { + switch outType.Kind() { + case reflect.Slice: + fallthrough + case reflect.Chan: + fallthrough + case reflect.Array: + return nil + } + return fmt.Errorf("cannot use " + outType.String() + ", only slice or array supported") +} + +// Check if the outInnerType is of type struct +func ensureOutInnerType(outInnerType reflect.Type) error { + switch outInnerType.Kind() { + case reflect.Struct: + return nil + } + return fmt.Errorf("cannot use " + outInnerType.String() + ", only struct supported") +} + +func ensureOutCapacity(out *reflect.Value, csvLen int) error { + switch out.Kind() { + case reflect.Array: + if out.Len() < csvLen-1 { // Array is not big enough to hold the CSV content (arrays are not addressable) + return fmt.Errorf("array capacity problem: cannot store %d %s in %s", csvLen-1, out.Type().Elem().String(), out.Type().String()) + } + case reflect.Slice: + if !out.CanAddr() && out.Len() < csvLen-1 { // Slice is not big enough tho hold the CSV content and is not addressable + return fmt.Errorf("slice capacity problem and is not addressable (did you forget &?)") + } else if out.CanAddr() && out.Len() < csvLen-1 { + out.Set(reflect.MakeSlice(out.Type(), csvLen-1, csvLen-1)) // Slice is not big enough, so grows it + } + } + return nil +} + +func getCSVFieldPosition(key string, structInfo *structInfo) *fieldInfo { + for _, field := range structInfo.Fields { + if field.matchesKey(key) { + return &field + } + } + return nil +} + +func createNewOutInner(outInnerWasPointer bool, outInnerType reflect.Type) reflect.Value { + if outInnerWasPointer { + return reflect.New(outInnerType) + } + return reflect.New(outInnerType).Elem() +} + +func setInnerField(outInner *reflect.Value, outInnerWasPointer bool, index []int, value string) error { + oi := *outInner + if outInnerWasPointer { + oi = outInner.Elem() + } + return setField(oi.FieldByIndex(index), value) +} diff --git a/vendor/github.com/gocarina/gocsv/decode_test.go b/vendor/github.com/gocarina/gocsv/decode_test.go new file mode 100644 index 000000000000..baafd8792a55 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/decode_test.go @@ -0,0 +1,415 @@ +package gocsv + +import ( + "bytes" + "encoding/csv" + "io" + "strconv" + "strings" + "testing" +) + +func Test_readTo(t *testing.T) { + b := bytes.NewBufferString(`foo,BAR,Baz +f,1,baz +e,3,b`) + d := &decoder{in: b} + + var samples []Sample + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if len(samples) != 2 { + t.Fatalf("expected 2 sample instances, got %d", len(samples)) + } + expected := Sample{Foo: "f", Bar: 1, Baz: "baz"} + if expected != samples[0] { + t.Fatalf("expected first sample %v, got %v", expected, samples[0]) + } + expected = Sample{Foo: "e", Bar: 3, Baz: "b"} + if expected != samples[1] { + t.Fatalf("expected second sample %v, got %v", expected, samples[1]) + } + + b = bytes.NewBufferString(`foo,BAR,Baz +f,1,baz +e,BAD_INPUT,b`) + d = &decoder{in: b} + samples = []Sample{} + err := readTo(d, &samples) + if err == nil { + t.Fatalf("Expected error from bad input, got: %+v", samples) + } + switch actualErr := err.(type) { + case *csv.ParseError: + if actualErr.Line != 3 { + t.Fatalf("Expected csv.ParseError on line 3, got: %d", actualErr.Line) + } + if actualErr.Column != 2 { + t.Fatalf("Expected csv.ParseError in column 2, got: %d", actualErr.Column) + } + default: + t.Fatalf("incorrect error type: %T", err) + } + +} + +func Test_readTo_complex_embed(t *testing.T) { + b := bytes.NewBufferString(`first,foo,BAR,Baz,last,abc +aa,bb,11,cc,dd,ee +ff,gg,22,hh,ii,jj`) + d := &decoder{in: b} + + var samples []SkipFieldSample + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if len(samples) != 2 { + t.Fatalf("expected 2 sample instances, got %d", len(samples)) + } + expected := SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "aa", + Sample: Sample{ + Foo: "bb", + Bar: 11, + Baz: "cc", + }, + Quux: "dd", + }, + Corge: "ee", + } + if expected != samples[0] { + t.Fatalf("expected first sample %v, got %v", expected, samples[0]) + } + expected = SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "ff", + Sample: Sample{ + Foo: "gg", + Bar: 22, + Baz: "hh", + }, + Quux: "ii", + }, + Corge: "jj", + } + if expected != samples[1] { + t.Fatalf("expected first sample %v, got %v", expected, samples[1]) + } +} + +func Test_readEach(t *testing.T) { + b := bytes.NewBufferString(`first,foo,BAR,Baz,last,abc +aa,bb,11,cc,dd,ee +ff,gg,22,hh,ii,jj`) + d := &decoder{in: b} + + c := make(chan SkipFieldSample) + var samples []SkipFieldSample + go func() { + if err := readEach(d, c); err != nil { + t.Fatal(err) + } + }() + for v := range c { + samples = append(samples, v) + } + if len(samples) != 2 { + t.Fatalf("expected 2 sample instances, got %d", len(samples)) + } + expected := SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "aa", + Sample: Sample{ + Foo: "bb", + Bar: 11, + Baz: "cc", + }, + Quux: "dd", + }, + Corge: "ee", + } + if expected != samples[0] { + t.Fatalf("expected first sample %v, got %v", expected, samples[0]) + } + expected = SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "ff", + Sample: Sample{ + Foo: "gg", + Bar: 22, + Baz: "hh", + }, + Quux: "ii", + }, + Corge: "jj", + } + if expected != samples[1] { + t.Fatalf("expected first sample %v, got %v", expected, samples[1]) + } +} + +func Test_maybeMissingStructFields(t *testing.T) { + structTags := []fieldInfo{ + {keys: []string{"foo"}}, + {keys: []string{"bar"}}, + {keys: []string{"baz"}}, + } + badHeaders := []string{"hi", "mom", "bacon"} + goodHeaders := []string{"foo", "bar", "baz"} + + // no tags to match, expect no error + if err := maybeMissingStructFields([]fieldInfo{}, goodHeaders); err != nil { + t.Fatal(err) + } + + // bad headers, expect an error + if err := maybeMissingStructFields(structTags, badHeaders); err == nil { + t.Fatal("expected an error, but no error found") + } + + // good headers, expect no error + if err := maybeMissingStructFields(structTags, goodHeaders); err != nil { + t.Fatal(err) + } + + // extra headers, but all structtags match; expect no error + moarHeaders := append(goodHeaders, "qux", "quux", "corge", "grault") + if err := maybeMissingStructFields(structTags, moarHeaders); err != nil { + t.Fatal(err) + } + + // not all structTags match, but there's plenty o' headers; expect + // error + mismatchedHeaders := []string{"foo", "qux", "quux", "corgi"} + if err := maybeMissingStructFields(structTags, mismatchedHeaders); err == nil { + t.Fatal("expected an error, but no error found") + } +} + +func Test_maybeDoubleHeaderNames(t *testing.T) { + b := bytes.NewBufferString(`foo,BAR,foo +f,1,baz +e,3,b`) + d := &decoder{in: b} + var samples []Sample + + // *** check maybeDoubleHeaderNames + if err := maybeDoubleHeaderNames([]string{"foo", "BAR", "foo"}); err == nil { + t.Fatal("maybeDoubleHeaderNames did not raise an error when a should have.") + } + + // *** check readTo + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + // Double header allowed, value should be of third row + if samples[0].Foo != "baz" { + t.Fatal("Double header allowed, value should be of third row but is not. Function called is readTo.") + } + // Double header not allowed, should fail + FailIfDoubleHeaderNames = true + if err := readTo(d, &samples); err == nil { + t.Fatal("Double header not allowed but no error raised. Function called is readTo.") + } + + // *** check readEach + FailIfDoubleHeaderNames = false + b = bytes.NewBufferString(`foo,BAR,foo + f,1,baz + e,3,b`) + d = &decoder{in: b} + samples = samples[:0] + c := make(chan Sample) + go func() { + if err := readEach(d, c); err != nil { + t.Fatal(err) + } + }() + for v := range c { + samples = append(samples, v) + } + // Double header allowed, value should be of third row + if samples[0].Foo != "baz" { + t.Fatal("Double header allowed, value should be of third row but is not. Function called is readEach.") + } + // Double header not allowed, should fail + FailIfDoubleHeaderNames = true + b = bytes.NewBufferString(`foo,BAR,foo +f,1,baz +e,3,b`) + d = &decoder{in: b} + c = make(chan Sample) + go func() { + if err := readEach(d, c); err == nil { + t.Fatal("Double header not allowed but no error raised. Function called is readEach.") + } + }() + for v := range c { + samples = append(samples, v) + } +} + +func TestUnmarshalToCallback(t *testing.T) { + b := bytes.NewBufferString(`first,foo,BAR,Baz,last,abc +aa,bb,11,cc,dd,ee +ff,gg,22,hh,ii,jj`) + var samples []SkipFieldSample + if err := UnmarshalBytesToCallback(b.Bytes(), func(s SkipFieldSample) { + samples = append(samples, s) + }); err != nil { + t.Fatal(err) + } + if len(samples) != 2 { + t.Fatalf("expected 2 sample instances, got %d", len(samples)) + } + expected := SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "aa", + Sample: Sample{ + Foo: "bb", + Bar: 11, + Baz: "cc", + }, + Quux: "dd", + }, + Corge: "ee", + } + if expected != samples[0] { + t.Fatalf("expected first sample %v, got %v", expected, samples[0]) + } + expected = SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "ff", + Sample: Sample{ + Foo: "gg", + Bar: 22, + Baz: "hh", + }, + Quux: "ii", + }, + Corge: "jj", + } + if expected != samples[1] { + t.Fatalf("expected first sample %v, got %v", expected, samples[1]) + } +} + +// TestRenamedTypes tests for unmarshaling functions on redefined basic types. +func TestRenamedTypesUnmarshal(t *testing.T) { + b := bytes.NewBufferString(`foo;bar +1,4;1.5 +2,3;2.4`) + d := &decoder{in: b} + var samples []RenamedSample + + // Set different csv field separator to enable comma in floats + SetCSVReader(func(in io.Reader) *csv.Reader { + csvin := csv.NewReader(in) + csvin.Comma = ';' + return csvin + }) + // Switch back to default for tests executed after this + defer SetCSVReader(DefaultCSVReader) + + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if samples[0].RenamedFloatUnmarshaler != 1.4 { + t.Fatalf("Parsed float value wrong for renamed float64 type. Expected 1.4, got %v.", samples[0].RenamedFloatUnmarshaler) + } + if samples[0].RenamedFloatDefault != 1.5 { + t.Fatalf("Parsed float value wrong for renamed float64 type without an explicit unmarshaler function. Expected 1.5, got %v.", samples[0].RenamedFloatDefault) + } + + // Test that errors raised by UnmarshalCSV are correctly reported + b = bytes.NewBufferString(`foo;bar +4.2;2.4`) + d = &decoder{in: b} + samples = samples[:0] + if perr, _ := readTo(d, &samples).(*csv.ParseError); perr == nil { + t.Fatalf("Expected ParseError, got nil.") + } else if _, ok := perr.Err.(UnmarshalError); !ok { + t.Fatalf("Expected UnmarshalError, got %v", perr.Err) + } +} + +func (rf *RenamedFloat64Unmarshaler) UnmarshalCSV(csv string) (err error) { + // Purely for testing purposes: Raise error on specific string + if csv == "4.2" { + return UnmarshalError{"Test error: Invalid float 4.2"} + } + + // Convert , to . before parsing to create valid float strings + converted := strings.Replace(csv, ",", ".", -1) + var f float64 + if f, err = strconv.ParseFloat(converted, 64); err != nil { + return err + } + *rf = RenamedFloat64Unmarshaler(f) + return nil +} + +type UnmarshalError struct { + msg string +} + +func (e UnmarshalError) Error() string { + return e.msg +} + +func TestMultipleStructTags(t *testing.T) { + b := bytes.NewBufferString(`foo,BAR,Baz +e,3,b`) + d := &decoder{in: b} + + var samples []MultiTagSample + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if samples[0].Foo != "b" { + t.Fatalf("expected second tag value 'b' in multi tag struct field, got %v", samples[0].Foo) + } + + b = bytes.NewBufferString(`foo,BAR +e,3`) + d = &decoder{in: b} + + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if samples[0].Foo != "e" { + t.Fatalf("wrong value in multi tag struct field, expected 'e', got %v", samples[0].Foo) + } + + b = bytes.NewBufferString(`BAR,Baz +3,b`) + d = &decoder{in: b} + + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if samples[0].Foo != "b" { + t.Fatal("wrong value in multi tag struct field") + } +} + +func TestStructTagSeparator(t *testing.T) { + b := bytes.NewBufferString(`foo,BAR,Baz +e,3,b`) + d := &decoder{in: b} + + defaultTagSeparator := TagSeparator + TagSeparator = "|" + defer func() { TagSeparator = defaultTagSeparator }() + + var samples []TagSeparatorSample + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + + if samples[0].Foo != "b" { + t.Fatal("expected second tag value in multi tag struct field.") + } +} diff --git a/vendor/github.com/gocarina/gocsv/encode.go b/vendor/github.com/gocarina/gocsv/encode.go new file mode 100644 index 000000000000..52dd0f722aa8 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/encode.go @@ -0,0 +1,135 @@ +package gocsv + +import ( + "encoding/csv" + "fmt" + "io" + "reflect" +) + +type encoder struct { + out io.Writer +} + +func newEncoder(out io.Writer) *encoder { + return &encoder{out} +} + +func writeFromChan(writer *csv.Writer, c <-chan interface{}) error { + // Get the first value. It wil determine the header structure. + firstValue := <-c + inValue, inType := getConcreteReflectValueAndType(firstValue) // Get the concrete type + if err := ensureStructOrPtr(inType); err != nil { + return err + } + inInnerWasPointer := inType.Kind() == reflect.Ptr + inInnerStructInfo := getStructInfo(inType) // Get the inner struct info to get CSV annotations + csvHeadersLabels := make([]string, len(inInnerStructInfo.Fields)) + for i, fieldInfo := range inInnerStructInfo.Fields { // Used to write the header (first line) in CSV + csvHeadersLabels[i] = fieldInfo.getFirstKey() + } + if err := writer.Write(csvHeadersLabels); err != nil { + return err + } + write := func(val reflect.Value) error { + for j, fieldInfo := range inInnerStructInfo.Fields { + csvHeadersLabels[j] = "" + inInnerFieldValue, err := getInnerField(val, inInnerWasPointer, fieldInfo.IndexChain) // Get the correct field header <-> position + if err != nil { + return err + } + csvHeadersLabels[j] = inInnerFieldValue + } + if err := writer.Write(csvHeadersLabels); err != nil { + return err + } + return nil + } + if err := write(inValue); err != nil { + return err + } + for v := range c { + val, _ := getConcreteReflectValueAndType(v) // Get the concrete type (not pointer) (Slice or Array) + if err := ensureStructOrPtr(inType); err != nil { + return err + } + if err := write(val); err != nil { + return err + } + } + writer.Flush() + return writer.Error() +} + +func writeTo(writer *csv.Writer, in interface{}) error { + inValue, inType := getConcreteReflectValueAndType(in) // Get the concrete type (not pointer) (Slice or Array) + if err := ensureInType(inType); err != nil { + return err + } + inInnerWasPointer, inInnerType := getConcreteContainerInnerType(inType) // Get the concrete inner type (not pointer) (Container<"?">) + if err := ensureInInnerType(inInnerType); err != nil { + return err + } + inInnerStructInfo := getStructInfo(inInnerType) // Get the inner struct info to get CSV annotations + csvHeadersLabels := make([]string, len(inInnerStructInfo.Fields)) + for i, fieldInfo := range inInnerStructInfo.Fields { // Used to write the header (first line) in CSV + csvHeadersLabels[i] = fieldInfo.getFirstKey() + } + if err := writer.Write(csvHeadersLabels); err != nil { + return err + } + inLen := inValue.Len() + for i := 0; i < inLen; i++ { // Iterate over container rows + for j, fieldInfo := range inInnerStructInfo.Fields { + csvHeadersLabels[j] = "" + inInnerFieldValue, err := getInnerField(inValue.Index(i), inInnerWasPointer, fieldInfo.IndexChain) // Get the correct field header <-> position + if err != nil { + return err + } + csvHeadersLabels[j] = inInnerFieldValue + } + if err := writer.Write(csvHeadersLabels); err != nil { + return err + } + } + writer.Flush() + return writer.Error() +} + +func ensureStructOrPtr(t reflect.Type) error { + switch t.Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + return nil + } + return fmt.Errorf("cannot use " + t.String() + ", only slice or array supported") +} + +// Check if the inType is an array or a slice +func ensureInType(outType reflect.Type) error { + switch outType.Kind() { + case reflect.Slice: + fallthrough + case reflect.Array: + return nil + } + return fmt.Errorf("cannot use " + outType.String() + ", only slice or array supported") +} + +// Check if the inInnerType is of type struct +func ensureInInnerType(outInnerType reflect.Type) error { + switch outInnerType.Kind() { + case reflect.Struct: + return nil + } + return fmt.Errorf("cannot use " + outInnerType.String() + ", only struct supported") +} + +func getInnerField(outInner reflect.Value, outInnerWasPointer bool, index []int) (string, error) { + oi := outInner + if outInnerWasPointer { + oi = outInner.Elem() + } + return getFieldAsString(oi.FieldByIndex(index)) +} diff --git a/vendor/github.com/gocarina/gocsv/encode_test.go b/vendor/github.com/gocarina/gocsv/encode_test.go new file mode 100644 index 000000000000..1bd9e12a37b9 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/encode_test.go @@ -0,0 +1,214 @@ +package gocsv + +import ( + "bytes" + "encoding/csv" + "io" + "math" + "strconv" + "strings" + "testing" +) + +func assertLine(t *testing.T, expected, actual []string) { + if len(expected) != len(actual) { + t.Fatalf("line length mismatch between expected: %d and actual: %d", len(expected), len(actual)) + } + for i := range expected { + if expected[i] != actual[i] { + t.Fatalf("mismatch on field %d at line `%s`: %s != %s", i, expected, expected[i], actual[i]) + } + } +} + +func Test_writeTo(t *testing.T) { + b := bytes.Buffer{} + e := &encoder{out: &b} + blah := 2 + s := []Sample{ + {Foo: "f", Bar: 1, Baz: "baz", Frop: 0.1, Blah: &blah}, + {Foo: "e", Bar: 3, Baz: "b", Frop: 6.0 / 13, Blah: nil}, + } + if err := writeTo(csv.NewWriter(e.out), s); err != nil { + t.Fatal(err) + } + + lines, err := csv.NewReader(&b).ReadAll() + if err != nil { + t.Fatal(err) + } + if len(lines) != 3 { + t.Fatalf("expected 3 lines, got %d", len(lines)) + } + assertLine(t, []string{"foo", "BAR", "Baz", "Quux", "Blah"}, lines[0]) + assertLine(t, []string{"f", "1", "baz", "0.1", "2"}, lines[1]) + assertLine(t, []string{"e", "3", "b", "0.46153846153846156", ""}, lines[2]) +} + +func Test_writeTo_multipleTags(t *testing.T) { + b := bytes.Buffer{} + e := &encoder{out: &b} + s := []MultiTagSample{ + {Foo: "abc", Bar: 123}, + {Foo: "def", Bar: 234}, + } + if err := writeTo(csv.NewWriter(e.out), s); err != nil { + t.Fatal(err) + } + + lines, err := csv.NewReader(&b).ReadAll() + if err != nil { + t.Fatal(err) + } + if len(lines) != 3 { + t.Fatalf("expected 3 lines, got %d", len(lines)) + } + // the first tag for each field is the encoding CSV header + assertLine(t, []string{"Baz", "BAR"}, lines[0]) + assertLine(t, []string{"abc", "123"}, lines[1]) + assertLine(t, []string{"def", "234"}, lines[2]) +} + +func Test_writeTo_embed(t *testing.T) { + b := bytes.Buffer{} + e := &encoder{out: &b} + blah := 2 + s := []EmbedSample{ + { + Qux: "aaa", + Sample: Sample{Foo: "f", Bar: 1, Baz: "baz", Frop: 0.2, Blah: &blah}, + Ignore: "shouldn't be marshalled", + Quux: "zzz", + Grault: math.Pi, + }, + } + if err := writeTo(csv.NewWriter(e.out), s); err != nil { + t.Fatal(err) + } + + lines, err := csv.NewReader(&b).ReadAll() + if err != nil { + t.Fatal(err) + } + if len(lines) != 2 { + t.Fatalf("expected 2 lines, got %d", len(lines)) + } + assertLine(t, []string{"first", "foo", "BAR", "Baz", "Quux", "Blah", "garply", "last"}, lines[0]) + assertLine(t, []string{"aaa", "f", "1", "baz", "0.2", "2", "3.141592653589793", "zzz"}, lines[1]) +} + +func Test_writeTo_complex_embed(t *testing.T) { + b := bytes.Buffer{} + e := &encoder{out: &b} + sfs := []SkipFieldSample{ + { + EmbedSample: EmbedSample{ + Qux: "aaa", + Sample: Sample{ + Foo: "bbb", + Bar: 111, + Baz: "ddd", + Frop: 1.2e22, + Blah: nil, + }, + Ignore: "eee", + Grault: 0.1, + Quux: "fff", + }, + MoreIgnore: "ggg", + Corge: "hhh", + }, + } + if err := writeTo(csv.NewWriter(e.out), sfs); err != nil { + t.Fatal(err) + } + lines, err := csv.NewReader(&b).ReadAll() + if err != nil { + t.Fatal(err) + } + if len(lines) != 2 { + t.Fatalf("expected 2 lines, got %d", len(lines)) + } + assertLine(t, []string{"first", "foo", "BAR", "Baz", "Quux", "Blah", "garply", "last", "abc"}, lines[0]) + assertLine(t, []string{"aaa", "bbb", "111", "ddd", "12000000000000000000000", "", "0.1", "fff", "hhh"}, lines[1]) +} + +func Test_writeToChan(t *testing.T) { + b := bytes.Buffer{} + e := &encoder{out: &b} + c := make(chan interface{}) + go func() { + for i := 0; i < 100; i++ { + v := Sample{Foo: "f", Bar: i, Baz: "baz" + strconv.Itoa(i), Frop: float64(i), Blah: nil} + c <- v + } + close(c) + }() + if err := MarshalChan(c, csv.NewWriter(e.out)); err != nil { + t.Fatal(err) + } + lines, err := csv.NewReader(&b).ReadAll() + if err != nil { + t.Fatal(err) + } + if len(lines) != 101 { + t.Fatalf("expected 100 lines, got %d", len(lines)) + } + for i, l := range lines { + if i == 0 { + assertLine(t, []string{"foo", "BAR", "Baz", "Quux", "Blah"}, l) + continue + } + assertLine(t, []string{"f", strconv.Itoa(i - 1), "baz" + strconv.Itoa(i-1), strconv.FormatFloat(float64(i-1), 'f', -1, 64), ""}, l) + } +} + +// TestRenamedTypes tests for marshaling functions on redefined basic types. +func TestRenamedTypesMarshal(t *testing.T) { + samples := []RenamedSample{ + {RenamedFloatUnmarshaler: 1.4, RenamedFloatDefault: 1.5}, + {RenamedFloatUnmarshaler: 2.3, RenamedFloatDefault: 2.4}, + } + + SetCSVWriter(func(out io.Writer) *csv.Writer { + csvout := csv.NewWriter(out) + csvout.Comma = ';' + return csvout + }) + // Switch back to default for tests executed after this + defer SetCSVWriter(DefaultCSVWriter) + + csvContent, err := MarshalString(&samples) + if err != nil { + t.Fatal(err) + } + if csvContent != "foo;bar\n1,4;1.5\n2,3;2.4\n" { + t.Fatalf("Error marshaling floats with , as separator. Expected \nfoo;bar\n1,4;1.5\n2,3;2.4\ngot:\n%v", csvContent) + } + + // Test that errors raised by MarshalCSV are correctly reported + samples = []RenamedSample{ + {RenamedFloatUnmarshaler: 4.2, RenamedFloatDefault: 1.5}, + } + _, err = MarshalString(&samples) + if _, ok := err.(MarshalError); !ok { + t.Fatalf("Expected UnmarshalError, got %v", err) + } +} + +func (rf *RenamedFloat64Unmarshaler) MarshalCSV() (csv string, err error) { + if *rf == RenamedFloat64Unmarshaler(4.2) { + return "", MarshalError{"Test error: Invalid float 4.2"} + } + csv = strconv.FormatFloat(float64(*rf), 'f', 1, 64) + csv = strings.Replace(csv, ".", ",", -1) + return csv, nil +} + +type MarshalError struct { + msg string +} + +func (e MarshalError) Error() string { + return e.msg +} diff --git a/vendor/github.com/gocarina/gocsv/reflect.go b/vendor/github.com/gocarina/gocsv/reflect.go new file mode 100644 index 000000000000..e96fb57a8f31 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/reflect.go @@ -0,0 +1,104 @@ +package gocsv + +import ( + "reflect" + "strings" + "sync" +) + +// -------------------------------------------------------------------------- +// Reflection helpers + +type structInfo struct { + Fields []fieldInfo +} + +// fieldInfo is a struct field that should be mapped to a CSV column, or vica-versa +// Each IndexChain element before the last is the index of an the embedded struct field +// that defines Key as a tag +type fieldInfo struct { + keys []string + IndexChain []int +} + +func (f fieldInfo) getFirstKey() string { + return f.keys[0] +} + +func (f fieldInfo) matchesKey(key string) bool { + for _, k := range f.keys { + if key == k { + return true + } + } + return false +} + +var structMap = make(map[reflect.Type]*structInfo) +var structMapMutex sync.RWMutex + +func getStructInfo(rType reflect.Type) *structInfo { + structMapMutex.RLock() + stInfo, ok := structMap[rType] + structMapMutex.RUnlock() + if ok { + return stInfo + } + fieldsList := getFieldInfos(rType, []int{}) + stInfo = &structInfo{fieldsList} + return stInfo +} + +func getFieldInfos(rType reflect.Type, parentIndexChain []int) []fieldInfo { + fieldsCount := rType.NumField() + fieldsList := make([]fieldInfo, 0, fieldsCount) + for i := 0; i < fieldsCount; i++ { + field := rType.Field(i) + if field.PkgPath != "" { + continue + } + indexChain := append(parentIndexChain, i) + // if the field is an embedded struct, create a fieldInfo for each of its fields + if field.Anonymous && field.Type.Kind() == reflect.Struct { + fieldsList = append(fieldsList, getFieldInfos(field.Type, indexChain)...) + continue + } + fieldInfo := fieldInfo{IndexChain: indexChain} + fieldTag := field.Tag.Get("csv") + fieldTags := strings.Split(fieldTag, TagSeparator) + filteredTags := []string{} + for _, fieldTagEntry := range fieldTags { + if fieldTagEntry != "omitempty" { + filteredTags = append(filteredTags, fieldTagEntry) + } + } + + if len(filteredTags) == 1 && filteredTags[0] == "-" { + continue + } else if len(filteredTags) > 0 && filteredTags[0] != "" { + fieldInfo.keys = filteredTags + } else { + fieldInfo.keys = []string{field.Name} + } + fieldsList = append(fieldsList, fieldInfo) + } + return fieldsList +} + +func getConcreteContainerInnerType(in reflect.Type) (inInnerWasPointer bool, inInnerType reflect.Type) { + inInnerType = in.Elem() + inInnerWasPointer = false + if inInnerType.Kind() == reflect.Ptr { + inInnerWasPointer = true + inInnerType = inInnerType.Elem() + } + return inInnerWasPointer, inInnerType +} + +func getConcreteReflectValueAndType(in interface{}) (reflect.Value, reflect.Type) { + value := reflect.ValueOf(in) + if value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value, value.Type() +} diff --git a/vendor/github.com/gocarina/gocsv/sample_structs_test.go b/vendor/github.com/gocarina/gocsv/sample_structs_test.go new file mode 100644 index 000000000000..90dccbc04305 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/sample_structs_test.go @@ -0,0 +1,42 @@ +package gocsv + +type Sample struct { + Foo string `csv:"foo"` + Bar int `csv:"BAR"` + Baz string `csv:"Baz"` + Frop float64 `csv:"Quux"` + Blah *int `csv:"Blah"` +} + +type EmbedSample struct { + Qux string `csv:"first"` + Sample + Ignore string `csv:"-"` + Grault float64 `csv:"garply"` + Quux string `csv:"last"` +} + +type SkipFieldSample struct { + EmbedSample + MoreIgnore string `csv:"-"` + Corge string `csv:"abc"` +} + +// Testtype for unmarshal/marshal functions on renamed basic types +type RenamedFloat64Unmarshaler float64 +type RenamedFloat64Default float64 + +type RenamedSample struct { + RenamedFloatUnmarshaler RenamedFloat64Unmarshaler `csv:"foo"` + RenamedFloatDefault RenamedFloat64Default `csv:"bar"` +} + +type MultiTagSample struct { + Foo string `csv:"Baz,foo"` + Bar int `csv:"BAR"` +} + +type TagSeparatorSample struct { + Foo string `csv:"Baz|foo"` + Bar int `csv:"BAR"` +} diff --git a/vendor/github.com/gocarina/gocsv/types.go b/vendor/github.com/gocarina/gocsv/types.go new file mode 100644 index 000000000000..3af1ad30a111 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/types.go @@ -0,0 +1,430 @@ +package gocsv + +import ( + "encoding" + "fmt" + "reflect" + "strconv" + "strings" +) + +// -------------------------------------------------------------------------- +// Conversion interfaces + +// TypeMarshaller is implemented by any value that has a MarshalCSV method +// This converter is used to convert the value to it string representation +type TypeMarshaller interface { + MarshalCSV() (string, error) +} + +// Stringer is implemented by any value that has a String method +// This converter is used to convert the value to it string representation +// This converter will be used if your value does not implement TypeMarshaller +type Stringer interface { + String() string +} + +// TypeUnmarshaller is implemented by any value that has an UnmarshalCSV method +// This converter is used to convert a string to your value representation of that string +type TypeUnmarshaller interface { + UnmarshalCSV(string) error +} + +// NoUnmarshalFuncError is the custom error type to be raised in case there is no unmarshal function defined on type +type NoUnmarshalFuncError struct { + msg string +} + +func (e NoUnmarshalFuncError) Error() string { + return e.msg +} + +// NoMarshalFuncError is the custom error type to be raised in case there is no marshal function defined on type +type NoMarshalFuncError struct { + msg string +} + +func (e NoMarshalFuncError) Error() string { + return e.msg +} + +var ( + stringerType = reflect.TypeOf((*Stringer)(nil)).Elem() + marshallerType = reflect.TypeOf((*TypeMarshaller)(nil)).Elem() + unMarshallerType = reflect.TypeOf((*TypeUnmarshaller)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnMarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// -------------------------------------------------------------------------- +// Conversion helpers + +func toString(in interface{}) (string, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + return inValue.String(), nil + case reflect.Bool: + b := inValue.Bool() + if b { + return "true", nil + } + return "false", nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fmt.Sprintf("%v", inValue.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fmt.Sprintf("%v", inValue.Uint()), nil + case reflect.Float32: + return strconv.FormatFloat(inValue.Float(), byte('f'), -1, 32), nil + case reflect.Float64: + return strconv.FormatFloat(inValue.Float(), byte('f'), -1, 64), nil + } + return "", fmt.Errorf("No known conversion from " + inValue.Type().String() + " to string") +} + +func toBool(in interface{}) (bool, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + s := inValue.String() + switch s { + case "yes": + return true, nil + case "no", "": + return false, nil + default: + return strconv.ParseBool(s) + } + case reflect.Bool: + return inValue.Bool(), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i := inValue.Int() + if i != 0 { + return true, nil + } + return false, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + i := inValue.Uint() + if i != 0 { + return true, nil + } + return false, nil + case reflect.Float32, reflect.Float64: + f := inValue.Float() + if f != 0 { + return true, nil + } + return false, nil + } + return false, fmt.Errorf("No known conversion from " + inValue.Type().String() + " to bool") +} + +func toInt(in interface{}) (int64, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + s := strings.TrimSpace(inValue.String()) + if s == "" { + return 0, nil + } + return strconv.ParseInt(s, 0, 64) + case reflect.Bool: + if inValue.Bool() { + return 1, nil + } + return 0, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return inValue.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return int64(inValue.Uint()), nil + case reflect.Float32, reflect.Float64: + return int64(inValue.Float()), nil + } + return 0, fmt.Errorf("No known conversion from " + inValue.Type().String() + " to int") +} + +func toUint(in interface{}) (uint64, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + s := strings.TrimSpace(inValue.String()) + if s == "" { + return 0, nil + } + + // support the float input + if strings.Contains(s, ".") { + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return 0, err + } + return uint64(f), nil + } + return strconv.ParseUint(s, 0, 64) + case reflect.Bool: + if inValue.Bool() { + return 1, nil + } + return 0, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return uint64(inValue.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return inValue.Uint(), nil + case reflect.Float32, reflect.Float64: + return uint64(inValue.Float()), nil + } + return 0, fmt.Errorf("No known conversion from " + inValue.Type().String() + " to uint") +} + +func toFloat(in interface{}) (float64, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + s := strings.TrimSpace(inValue.String()) + if s == "" { + return 0, nil + } + return strconv.ParseFloat(s, 64) + case reflect.Bool: + if inValue.Bool() { + return 1, nil + } + return 0, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(inValue.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return float64(inValue.Uint()), nil + case reflect.Float32, reflect.Float64: + return inValue.Float(), nil + } + return 0, fmt.Errorf("No known conversion from " + inValue.Type().String() + " to float") +} + +func setField(field reflect.Value, value string) error { + switch field.Interface().(type) { + case string: + s, err := toString(value) + if err != nil { + return err + } + field.SetString(s) + case bool: + b, err := toBool(value) + if err != nil { + return err + } + field.SetBool(b) + case int, int8, int16, int32, int64: + i, err := toInt(value) + if err != nil { + return err + } + field.SetInt(i) + case uint, uint8, uint16, uint32, uint64: + ui, err := toUint(value) + if err != nil { + return err + } + field.SetUint(ui) + case float32, float64: + f, err := toFloat(value) + if err != nil { + return err + } + field.SetFloat(f) + default: + // Not a native type, check for unmarshal method + if err := unmarshall(field, value); err != nil { + if _, ok := err.(NoUnmarshalFuncError); !ok { + return err + } + // Could not unmarshal, check for kind, e.g. renamed type from basic type + switch field.Kind() { + case reflect.String: + s, err := toString(value) + if err != nil { + return err + } + field.SetString(s) + case reflect.Bool: + b, err := toBool(value) + if err != nil { + return err + } + field.SetBool(b) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i, err := toInt(value) + if err != nil { + return err + } + field.SetInt(i) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ui, err := toUint(value) + if err != nil { + return err + } + field.SetUint(ui) + case reflect.Float32, reflect.Float64: + f, err := toFloat(value) + if err != nil { + return err + } + field.SetFloat(f) + default: + return err + } + } else { + return nil + } + } + return nil +} + +func getFieldAsString(field reflect.Value) (str string, err error) { + switch field.Kind() { + case reflect.Interface: + case reflect.Ptr: + if field.IsNil() { + return "", nil + } + return getFieldAsString(field.Elem()) + default: + // Check if field is go native type + switch field.Interface().(type) { + case string: + return field.String(), nil + case bool: + str, err = toString(field.Bool()) + if err != nil { + return str, err + } + case int, int8, int16, int32, int64: + str, err = toString(field.Int()) + if err != nil { + return str, err + } + case uint, uint8, uint16, uint32, uint64: + str, err = toString(field.Uint()) + if err != nil { + return str, err + } + case float32: + str, err = toString(float32(field.Float())) + if err != nil { + return str, err + } + case float64: + str, err = toString(field.Float()) + if err != nil { + return str, err + } + default: + // Not a native type, check for marshal method + str, err = marshall(field) + if err != nil { + if _, ok := err.(NoMarshalFuncError); !ok { + return str, err + } + // If not marshal method, is field compatible with/renamed from native type + switch field.Kind() { + case reflect.String: + return field.String(), nil + case reflect.Bool: + str, err = toString(field.Bool()) + if err != nil { + return str, err + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + str, err = toString(field.Int()) + if err != nil { + return str, err + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + str, err = toString(field.Uint()) + if err != nil { + return str, err + } + case reflect.Float32: + str, err = toString(float32(field.Float())) + if err != nil { + return str, err + } + case reflect.Float64: + str, err = toString(field.Float()) + if err != nil { + return str, err + } + } + } else { + return str, nil + } + } + } + return str, nil +} + +// -------------------------------------------------------------------------- +// Un/serializations helpers + +func unmarshall(field reflect.Value, value string) error { + dupField := field + unMarshallIt := func(finalField reflect.Value) error { + if finalField.CanInterface() && finalField.Type().Implements(unMarshallerType) { + if err := finalField.Interface().(TypeUnmarshaller).UnmarshalCSV(value); err != nil { + return err + } + return nil + } else if finalField.CanInterface() && finalField.Type().Implements(textUnMarshalerType) { // Otherwise try to use TextMarshaller + if err := finalField.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(value)); err != nil { + return err + } + return nil + } + + return NoUnmarshalFuncError{"No known conversion from string to " + field.Type().String() + ", " + field.Type().String() + " does not implements TypeUnmarshaller"} + } + for dupField.Kind() == reflect.Interface || dupField.Kind() == reflect.Ptr { + if dupField.IsNil() { + dupField = reflect.New(field.Type().Elem()) + field.Set(dupField) + return unMarshallIt(dupField) + break + } + dupField = dupField.Elem() + } + if dupField.CanAddr() { + return unMarshallIt(dupField.Addr()) + } + return NoUnmarshalFuncError{"No known conversion from string to " + field.Type().String() + ", " + field.Type().String() + " does not implements TypeUnmarshaller"} +} + +func marshall(field reflect.Value) (value string, err error) { + dupField := field + marshallIt := func(finalField reflect.Value) (string, error) { + if finalField.CanInterface() && finalField.Type().Implements(marshallerType) { // Use TypeMarshaller when possible + return finalField.Interface().(TypeMarshaller).MarshalCSV() + } else if finalField.CanInterface() && finalField.Type().Implements(stringerType) { // Otherwise try to use Stringer + return finalField.Interface().(Stringer).String(), nil + } else if finalField.CanInterface() && finalField.Type().Implements(textMarshalerType) { // Otherwise try to use TextMarshaller + text, err := finalField.Interface().(encoding.TextMarshaler).MarshalText() + return string(text), err + } + + return value, NoMarshalFuncError{"No known conversion from " + field.Type().String() + " to string, " + field.Type().String() + " does not implements TypeMarshaller nor Stringer"} + } + for dupField.Kind() == reflect.Interface || dupField.Kind() == reflect.Ptr { + if dupField.IsNil() { + return value, nil + } + dupField = dupField.Elem() + } + if dupField.CanAddr() { + return marshallIt(dupField.Addr()) + } + return value, NoMarshalFuncError{"No known conversion from " + field.Type().String() + " to string, " + field.Type().String() + " does not implements TypeMarshaller nor Stringer"} +} diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 000000000000..f9c841a51e0d --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 000000000000..659d6885fc7e --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 000000000000..115ae67c1157 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,154 @@ +package mapstructure + +import ( + "errors" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Type, to reflect.Type, + data interface{}) (interface{}, error) { + // Build our arguments that reflect expects + argVals := make([]reflect.Value, 3) + argVals[0] = reflect.ValueOf(from) + argVals[1] = reflect.ValueOf(to) + argVals[2] = reflect.ValueOf(data) + + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from, to, data) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), data) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + var err error + for _, f1 := range fs { + data, err = DecodeHookExec(f1, f, t, data) + if err != nil { + return nil, err + } + + // Modify the from kind to be correct with the new data + f = nil + if val := reflect.ValueOf(data); val.IsValid() { + f = val.Type() + } + } + + return data, nil + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } else { + return "0", nil + } + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go new file mode 100644 index 000000000000..53289afcfbf6 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go @@ -0,0 +1,229 @@ +package mapstructure + +import ( + "errors" + "reflect" + "testing" + "time" +) + +func TestComposeDecodeHookFunc(t *testing.T) { + f1 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return data.(string) + "foo", nil + } + + f2 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return data.(string) + "bar", nil + } + + f := ComposeDecodeHookFunc(f1, f2) + + result, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "") + if err != nil { + t.Fatalf("bad: %s", err) + } + if result.(string) != "foobar" { + t.Fatalf("bad: %#v", result) + } +} + +func TestComposeDecodeHookFunc_err(t *testing.T) { + f1 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) { + return nil, errors.New("foo") + } + + f2 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) { + panic("NOPE") + } + + f := ComposeDecodeHookFunc(f1, f2) + + _, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), 42) + if err.Error() != "foo" { + t.Fatalf("bad: %s", err) + } +} + +func TestComposeDecodeHookFunc_kinds(t *testing.T) { + var f2From reflect.Kind + + f1 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return int(42), nil + } + + f2 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + f2From = f + return data, nil + } + + f := ComposeDecodeHookFunc(f1, f2) + + _, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "") + if err != nil { + t.Fatalf("bad: %s", err) + } + if f2From != reflect.Int { + t.Fatalf("bad: %#v", f2From) + } +} + +func TestStringToSliceHookFunc(t *testing.T) { + f := StringToSliceHookFunc(",") + + strType := reflect.TypeOf("") + sliceType := reflect.TypeOf([]byte("")) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + {sliceType, sliceType, 42, 42, false}, + {strType, strType, 42, 42, false}, + { + strType, + sliceType, + "foo,bar,baz", + []string{"foo", "bar", "baz"}, + false, + }, + { + strType, + sliceType, + "", + []string{}, + false, + }, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} + +func TestStringToTimeDurationHookFunc(t *testing.T) { + f := StringToTimeDurationHookFunc() + + strType := reflect.TypeOf("") + timeType := reflect.TypeOf(time.Duration(5)) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + {strType, timeType, "5s", 5 * time.Second, false}, + {strType, timeType, "5", time.Duration(0), true}, + {strType, strType, "5", "5", false}, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} + +func TestWeaklyTypedHook(t *testing.T) { + var f DecodeHookFunc = WeaklyTypedHook + + boolType := reflect.TypeOf(true) + strType := reflect.TypeOf("") + sliceType := reflect.TypeOf([]byte("")) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + // TO STRING + { + boolType, + strType, + false, + "0", + false, + }, + + { + boolType, + strType, + true, + "1", + false, + }, + + { + reflect.TypeOf(float32(1)), + strType, + float32(7), + "7", + false, + }, + + { + reflect.TypeOf(int(1)), + strType, + int(7), + "7", + false, + }, + + { + sliceType, + strType, + []uint8("foo"), + "foo", + false, + }, + + { + reflect.TypeOf(uint(1)), + strType, + uint(7), + "7", + false, + }, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 000000000000..47a99e5af3f1 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 000000000000..a554e799bb7f --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,790 @@ +// The mapstructure package exposes functionality to convert an +// abitrary map[string]interface{} into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type should be DecodeHookFuncType or DecodeHookFuncKind. +// Either is accepted. Types are a superset of Kinds (Types can return +// Kinds) and are generally a richer thing to use, but Kinds are simpler +// if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. + // + // If an error is returned, the entire decode will fail with that + // error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // + WeaklyTypedInput bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string +} + +// Decode takes a map and uses reflection to convert it into the +// given Go native structure. val must be a pointer to a struct. +func Decode(m interface{}, rawVal interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: rawVal, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(m) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(raw interface{}) error { + return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error { + if data == nil { + // If the data is nil, then we don't set anything. + return nil + } + + dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + // If the data value is invalid, then we just set the value + // to be the zero value. + val.Set(reflect.Zero(val.Type())) + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the data. + var err error + data, err = DecodeHookExec( + d.config.DecodeHook, + dataVal.Type(), val.Type(), data) + if err != nil { + return err + } + } + + var err error + dataKind := getKind(val) + switch dataKind { + case reflect.Bool: + err = d.decodeBool(name, data, val) + case reflect.Interface: + err = d.decodeBasic(name, data, val) + case reflect.String: + err = d.decodeString(name, data, val) + case reflect.Int: + err = d.decodeInt(name, data, val) + case reflect.Uint: + err = d.decodeUint(name, data, val) + case reflect.Float32: + err = d.decodeFloat(name, data, val) + case reflect.Struct: + err = d.decodeStruct(name, data, val) + case reflect.Map: + err = d.decodeMap(name, data, val) + case reflect.Ptr: + err = d.decodePtr(name, data, val) + case reflect.Slice: + err = d.decodeSlice(name, data, val) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, dataKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metadata. + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch { + case elemKind == reflect.Uint8: + val.SetString(string(dataVal.Interface().([]uint8))) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(float64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if dataVal.Kind() != reflect.Map { + // In weak mode, we accept a slice of maps as an input... + if d.config.WeaklyTypedInput { + switch dataVal.Kind() { + case reflect.Array, reflect.Slice: + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + fmt.Sprintf("%s[%d]", name, i), + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil + } + } + + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } + + // Accumulate errors + errors := make([]string, 0) + + for _, k := range dataVal.MapKeys() { + fieldName := fmt.Sprintf("%s[%s]", name, k) + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + realVal := reflect.New(valElemType) + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return err + } + + val.Set(realVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + // Accept empty map instead of array/slice in weakly typed mode + if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } else { + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + } + + // Make a new slice to hold our result, same size as the original data. + valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valSlice.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + if dataValKind != reflect.Map { + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) + } + + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + fields := make(map[*reflect.StructField]reflect.Value) + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldKind := fieldType.Type.Kind() + + if fieldType.Anonymous { + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind)) + continue + } + } + + // If "squash" is specified in the tag, we squash the field down. + squash := false + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + } else { + structs = append(structs, val.FieldByName(fieldType.Name)) + } + continue + } + + // Normal struct field, store it away + fields[&fieldType] = structVal.Field(i) + } + } + + for fieldType, field := range fields { + fieldName := fieldType.Name + + tagValue := fieldType.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey, _ := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if strings.EqualFold(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Just ignore. + continue + } + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + if !field.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !field.CanSet() { + continue + } + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + } + + if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil { + errors = appendErrors(errors, err) + } + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey, _ := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey, _ := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = fmt.Sprintf("%s.%s", name, key) + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + } + + return nil +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go new file mode 100644 index 000000000000..41d2a41f7546 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go @@ -0,0 +1,279 @@ +package mapstructure + +import ( + "encoding/json" + "testing" +) + +func Benchmark_Decode(b *testing.B) { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "emails": []string{"one", "two", "three"}, + "extra": map[string]string{ + "twitter": "mitchellh", + }, + } + + var result Person + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +// decodeViaJSON takes the map data and passes it through encoding/json to convert it into the +// given Go native structure pointed to by v. v must be a pointer to a struct. +func decodeViaJSON(data interface{}, v interface{}) error { + // Perform the task by simply marshalling the input into JSON, + // then unmarshalling it into target native Go struct. + b, err := json.Marshal(data) + if err != nil { + return err + } + return json.Unmarshal(b, v) +} + +func Benchmark_DecodeViaJSON(b *testing.B) { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "emails": []string{"one", "two", "three"}, + "extra": map[string]string{ + "twitter": "mitchellh", + }, + } + + var result Person + for i := 0; i < b.N; i++ { + decodeViaJSON(input, &result) + } +} + +func Benchmark_DecodeBasic(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "Vuint": 42, + "vbool": true, + "Vfloat": 42.42, + "vsilent": true, + "vdata": 42, + } + + var result Basic + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeEmbedded(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result Embedded + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeTypeConversion(b *testing.B) { + input := map[string]interface{}{ + "IntToFloat": 42, + "IntToUint": 42, + "IntToBool": 1, + "IntToString": 42, + "UintToInt": 42, + "UintToFloat": 42, + "UintToBool": 42, + "UintToString": 42, + "BoolToInt": true, + "BoolToUint": true, + "BoolToFloat": true, + "BoolToString": true, + "FloatToInt": 42.42, + "FloatToUint": 42.42, + "FloatToBool": 42.42, + "FloatToString": 42.42, + "StringToInt": "42", + "StringToUint": "42", + "StringToBool": "1", + "StringToFloat": "42.42", + "SliceToMap": []interface{}{}, + "MapToSlice": map[string]interface{}{}, + } + + var resultStrict TypeConversionResult + for i := 0; i < b.N; i++ { + Decode(input, &resultStrict) + } +} + +func Benchmark_DecodeMap(b *testing.B) { + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeMapOfStruct(b *testing.B) { + input := map[string]interface{}{ + "value": map[string]interface{}{ + "foo": map[string]string{"vstring": "one"}, + "bar": map[string]string{"vstring": "two"}, + }, + } + + var result MapOfStruct + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeSlice(b *testing.B) { + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": []string{"foo", "bar", "baz"}, + } + + var result Slice + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeSliceOfStruct(b *testing.B) { + input := map[string]interface{}{ + "value": []map[string]interface{}{ + {"vstring": "one"}, + {"vstring": "two"}, + }, + } + + var result SliceOfStruct + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeWeaklyTypedInput(b *testing.B) { + type Person struct { + Name string + Age int + Emails []string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON, generated by a weakly typed language + // such as PHP. + input := map[string]interface{}{ + "name": 123, // number => string + "age": "42", // string => number + "emails": map[string]interface{}{}, // empty map => empty array + } + + var result Person + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeMetadata(b *testing.B) { + type Person struct { + Name string + Age int + } + + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "email": "foo@bar.com", + } + + var md Metadata + var result Person + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeMetadataEmbedded(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var md Metadata + var result EmbeddedSquash + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + b.Fatalf("err: %s", err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeTagged(b *testing.B) { + input := map[string]interface{}{ + "foo": "bar", + "bar": "value", + } + + var result Tagged + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go new file mode 100644 index 000000000000..7054f1ac9abc --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go @@ -0,0 +1,47 @@ +package mapstructure + +import "testing" + +// GH-1 +func TestDecode_NilValue(t *testing.T) { + input := map[string]interface{}{ + "vfoo": nil, + "vother": nil, + } + + var result Map + err := Decode(input, &result) + if err != nil { + t.Fatalf("should not error: %s", err) + } + + if result.Vfoo != "" { + t.Fatalf("value should be default: %s", result.Vfoo) + } + + if result.Vother != nil { + t.Fatalf("Vother should be nil: %s", result.Vother) + } +} + +// GH-10 +func TestDecode_mapInterfaceInterface(t *testing.T) { + input := map[interface{}]interface{}{ + "vfoo": nil, + "vother": nil, + } + + var result Map + err := Decode(input, &result) + if err != nil { + t.Fatalf("should not error: %s", err) + } + + if result.Vfoo != "" { + t.Fatalf("value should be default: %s", result.Vfoo) + } + + if result.Vother != nil { + t.Fatalf("Vother should be nil: %s", result.Vother) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go new file mode 100644 index 000000000000..f17c214a8a95 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go @@ -0,0 +1,203 @@ +package mapstructure + +import ( + "fmt" +) + +func ExampleDecode() { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "emails": []string{"one", "two", "three"}, + "extra": map[string]string{ + "twitter": "mitchellh", + }, + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: + // mapstructure.Person{Name:"Mitchell", Age:91, Emails:[]string{"one", "two", "three"}, Extra:map[string]string{"twitter":"mitchellh"}} +} + +func ExampleDecode_errors() { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": 123, + "age": "bad value", + "emails": []int{1, 2, 3}, + } + + var result Person + err := Decode(input, &result) + if err == nil { + panic("should have an error") + } + + fmt.Println(err.Error()) + // Output: + // 5 error(s) decoding: + // + // * 'Age' expected type 'int', got unconvertible type 'string' + // * 'Emails[0]' expected type 'string', got unconvertible type 'int' + // * 'Emails[1]' expected type 'string', got unconvertible type 'int' + // * 'Emails[2]' expected type 'string', got unconvertible type 'int' + // * 'Name' expected type 'string', got unconvertible type 'int' +} + +func ExampleDecode_metadata() { + type Person struct { + Name string + Age int + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "email": "foo@bar.com", + } + + // For metadata, we make a more advanced DecoderConfig so we can + // more finely configure the decoder that is used. In this case, we + // just tell the decoder we want to track metadata. + var md Metadata + var result Person + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + if err := decoder.Decode(input); err != nil { + panic(err) + } + + fmt.Printf("Unused keys: %#v", md.Unused) + // Output: + // Unused keys: []string{"email"} +} + +func ExampleDecode_weaklyTypedInput() { + type Person struct { + Name string + Age int + Emails []string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON, generated by a weakly typed language + // such as PHP. + input := map[string]interface{}{ + "name": 123, // number => string + "age": "42", // string => number + "emails": map[string]interface{}{}, // empty map => empty array + } + + var result Person + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + err = decoder.Decode(input) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: mapstructure.Person{Name:"123", Age:42, Emails:[]string{}} +} + +func ExampleDecode_tags() { + // Note that the mapstructure tags defined in the struct type + // can indicate which fields the values are mapped to. + type Person struct { + Name string `mapstructure:"person_name"` + Age int `mapstructure:"person_age"` + } + + input := map[string]interface{}{ + "person_name": "Mitchell", + "person_age": 91, + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: + // mapstructure.Person{Name:"Mitchell", Age:91} +} + +func ExampleDecode_embeddedStruct() { + // Squashing multiple embedded structs is allowed using the squash tag. + // This is demonstrated by creating a composite struct of multiple types + // and decoding into it. In this case, a person can carry with it both + // a Family and a Location, as well as their own FirstName. + type Family struct { + LastName string + } + type Location struct { + City string + } + type Person struct { + Family `mapstructure:",squash"` + Location `mapstructure:",squash"` + FirstName string + } + + input := map[string]interface{}{ + "FirstName": "Mitchell", + "LastName": "Hashimoto", + "City": "San Francisco", + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%s %s, %s", result.FirstName, result.LastName, result.City) + // Output: + // Mitchell Hashimoto, San Francisco +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go new file mode 100644 index 000000000000..ea2192414540 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go @@ -0,0 +1,1107 @@ +package mapstructure + +import ( + "encoding/json" + "io" + "reflect" + "sort" + "strings" + "testing" +) + +type Basic struct { + Vstring string + Vint int + Vuint uint + Vbool bool + Vfloat float64 + Vextra string + vsilent bool + Vdata interface{} + VjsonInt int + VjsonFloat float64 + VjsonNumber json.Number +} + +type BasicSquash struct { + Test Basic `mapstructure:",squash"` +} + +type Embedded struct { + Basic + Vunique string +} + +type EmbeddedPointer struct { + *Basic + Vunique string +} + +type EmbeddedSquash struct { + Basic `mapstructure:",squash"` + Vunique string +} + +type SquashOnNonStructType struct { + InvalidSquashType int `mapstructure:",squash"` +} + +type Map struct { + Vfoo string + Vother map[string]string +} + +type MapOfStruct struct { + Value map[string]Basic +} + +type Nested struct { + Vfoo string + Vbar Basic +} + +type NestedPointer struct { + Vfoo string + Vbar *Basic +} + +type NilInterface struct { + W io.Writer +} + +type Slice struct { + Vfoo string + Vbar []string +} + +type SliceOfStruct struct { + Value []Basic +} + +type Tagged struct { + Extra string `mapstructure:"bar,what,what"` + Value string `mapstructure:"foo"` +} + +type TypeConversionResult struct { + IntToFloat float32 + IntToUint uint + IntToBool bool + IntToString string + UintToInt int + UintToFloat float32 + UintToBool bool + UintToString string + BoolToInt int + BoolToUint uint + BoolToFloat float32 + BoolToString string + FloatToInt int + FloatToUint uint + FloatToBool bool + FloatToString string + SliceUint8ToString string + StringToInt int + StringToUint uint + StringToBool bool + StringToFloat float32 + SliceToMap map[string]interface{} + MapToSlice []interface{} +} + +func TestBasicTypes(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "Vuint": 42, + "vbool": true, + "Vfloat": 42.42, + "vsilent": true, + "vdata": 42, + "vjsonInt": json.Number("1234"), + "vjsonFloat": json.Number("1234.5"), + "vjsonNumber": json.Number("1234.5"), + } + + var result Basic + err := Decode(input, &result) + if err != nil { + t.Errorf("got an err: %s", err.Error()) + t.FailNow() + } + + if result.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vstring) + } + + if result.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vint) + } + + if result.Vuint != 42 { + t.Errorf("vuint value should be 42: %#v", result.Vuint) + } + + if result.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbool) + } + + if result.Vfloat != 42.42 { + t.Errorf("vfloat value should be 42.42: %#v", result.Vfloat) + } + + if result.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vextra) + } + + if result.vsilent != false { + t.Error("vsilent should not be set, it is unexported") + } + + if result.Vdata != 42 { + t.Error("vdata should be valid") + } + + if result.VjsonInt != 1234 { + t.Errorf("vjsonint value should be 1234: %#v", result.VjsonInt) + } + + if result.VjsonFloat != 1234.5 { + t.Errorf("vjsonfloat value should be 1234.5: %#v", result.VjsonFloat) + } + + if !reflect.DeepEqual(result.VjsonNumber, json.Number("1234.5")) { + t.Errorf("vjsonnumber value should be '1234.5': %T, %#v", result.VjsonNumber, result.VjsonNumber) + } +} + +func TestBasic_IntWithFloat(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": float64(42), + } + + var result Basic + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } +} + +func TestBasic_Merge(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": 42, + } + + var result Basic + result.Vuint = 100 + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + expected := Basic{ + Vint: 42, + Vuint: 100, + } + if !reflect.DeepEqual(result, expected) { + t.Fatalf("bad: %#v", result) + } +} + +func TestDecode_BasicSquash(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + } + + var result BasicSquash + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Test.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Test.Vstring) + } +} + +func TestDecode_Embedded(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result Embedded + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vstring != "innerfoo" { + t.Errorf("vstring value should be 'innerfoo': %#v", result.Vstring) + } + + if result.Vunique != "bar" { + t.Errorf("vunique value should be 'bar': %#v", result.Vunique) + } +} + +func TestDecode_EmbeddedPointer(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result EmbeddedPointer + err := Decode(input, &result) + if err == nil { + t.Fatal("should get error") + } +} + +func TestDecode_EmbeddedSquash(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var result EmbeddedSquash + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vstring) + } + + if result.Vunique != "bar" { + t.Errorf("vunique value should be 'bar': %#v", result.Vunique) + } +} + +func TestDecode_SquashOnNonStructType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "InvalidSquashType": 42, + } + + var result SquashOnNonStructType + err := Decode(input, &result) + if err == nil { + t.Fatal("unexpected success decoding invalid squash field type") + } else if !strings.Contains(err.Error(), "unsupported type for squash") { + t.Fatalf("unexpected error message for invalid squash field type: %s", err) + } +} + +func TestDecode_DecodeHook(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": "WHAT", + } + + decodeHook := func(from reflect.Kind, to reflect.Kind, v interface{}) (interface{}, error) { + if from == reflect.String && to != reflect.String { + return 5, nil + } + + return v, nil + } + + var result Basic + config := &DecoderConfig{ + DecodeHook: decodeHook, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Vint != 5 { + t.Errorf("vint should be 5: %#v", result.Vint) + } +} + +func TestDecode_DecodeHookType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": "WHAT", + } + + decodeHook := func(from reflect.Type, to reflect.Type, v interface{}) (interface{}, error) { + if from.Kind() == reflect.String && + to.Kind() != reflect.String { + return 5, nil + } + + return v, nil + } + + var result Basic + config := &DecoderConfig{ + DecodeHook: decodeHook, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Vint != 5 { + t.Errorf("vint should be 5: %#v", result.Vint) + } +} + +func TestDecode_Nil(t *testing.T) { + t.Parallel() + + var input interface{} = nil + result := Basic{ + Vstring: "foo", + } + + err := Decode(input, &result) + if err != nil { + t.Fatalf("err: %s", err) + } + + if result.Vstring != "foo" { + t.Fatalf("bad: %#v", result.Vstring) + } +} + +func TestDecode_NilInterfaceHook(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "w": "", + } + + decodeHook := func(f, t reflect.Type, v interface{}) (interface{}, error) { + if t.String() == "io.Writer" { + return nil, nil + } + + return v, nil + } + + var result NilInterface + config := &DecoderConfig{ + DecodeHook: decodeHook, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.W != nil { + t.Errorf("W should be nil: %#v", result.W) + } +} + +func TestDecode_NonStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "bar", + "bar": "baz", + } + + var result map[string]string + err := Decode(input, &result) + if err != nil { + t.Fatalf("err: %s", err) + } + + if result["foo"] != "bar" { + t.Fatal("foo is not bar") + } +} + +func TestDecode_StructMatch(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vbar": Basic{ + Vstring: "foo", + }, + } + + var result Nested + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("bad: %#v", result) + } +} + +func TestDecode_TypeConversion(t *testing.T) { + input := map[string]interface{}{ + "IntToFloat": 42, + "IntToUint": 42, + "IntToBool": 1, + "IntToString": 42, + "UintToInt": 42, + "UintToFloat": 42, + "UintToBool": 42, + "UintToString": 42, + "BoolToInt": true, + "BoolToUint": true, + "BoolToFloat": true, + "BoolToString": true, + "FloatToInt": 42.42, + "FloatToUint": 42.42, + "FloatToBool": 42.42, + "FloatToString": 42.42, + "SliceUint8ToString": []uint8("foo"), + "StringToInt": "42", + "StringToUint": "42", + "StringToBool": "1", + "StringToFloat": "42.42", + "SliceToMap": []interface{}{}, + "MapToSlice": map[string]interface{}{}, + } + + expectedResultStrict := TypeConversionResult{ + IntToFloat: 42.0, + IntToUint: 42, + UintToInt: 42, + UintToFloat: 42, + BoolToInt: 0, + BoolToUint: 0, + BoolToFloat: 0, + FloatToInt: 42, + FloatToUint: 42, + } + + expectedResultWeak := TypeConversionResult{ + IntToFloat: 42.0, + IntToUint: 42, + IntToBool: true, + IntToString: "42", + UintToInt: 42, + UintToFloat: 42, + UintToBool: true, + UintToString: "42", + BoolToInt: 1, + BoolToUint: 1, + BoolToFloat: 1, + BoolToString: "1", + FloatToInt: 42, + FloatToUint: 42, + FloatToBool: true, + FloatToString: "42.42", + SliceUint8ToString: "foo", + StringToInt: 42, + StringToUint: 42, + StringToBool: true, + StringToFloat: 42.42, + SliceToMap: map[string]interface{}{}, + MapToSlice: []interface{}{}, + } + + // Test strict type conversion + var resultStrict TypeConversionResult + err := Decode(input, &resultStrict) + if err == nil { + t.Errorf("should return an error") + } + if !reflect.DeepEqual(resultStrict, expectedResultStrict) { + t.Errorf("expected %v, got: %v", expectedResultStrict, resultStrict) + } + + // Test weak type conversion + var decoder *Decoder + var resultWeak TypeConversionResult + + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &resultWeak, + } + + decoder, err = NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if !reflect.DeepEqual(resultWeak, expectedResultWeak) { + t.Errorf("expected \n%#v, got: \n%#v", expectedResultWeak, resultWeak) + } +} + +func TestDecoder_ErrorUnused(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "hello", + "foo": "bar", + } + + var result Basic + config := &DecoderConfig{ + ErrorUnused: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err == nil { + t.Fatal("expected error") + } +} + +func TestMap(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vother == nil { + t.Fatal("vother should not be nil") + } + + if len(result.Vother) != 2 { + t.Error("vother should have two items") + } + + if result.Vother["foo"] != "foo" { + t.Errorf("'foo' key should be foo, got: %#v", result.Vother["foo"]) + } + + if result.Vother["bar"] != "bar" { + t.Errorf("'bar' key should be bar, got: %#v", result.Vother["bar"]) + } +} + +func TestMapMerge(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + result.Vother = map[string]string{"hello": "world"} + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + expected := map[string]string{ + "foo": "foo", + "bar": "bar", + "hello": "world", + } + if !reflect.DeepEqual(result.Vother, expected) { + t.Errorf("bad: %#v", result.Vother) + } +} + +func TestMapOfStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "value": map[string]interface{}{ + "foo": map[string]string{"vstring": "one"}, + "bar": map[string]string{"vstring": "two"}, + }, + } + + var result MapOfStruct + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Value == nil { + t.Fatal("value should not be nil") + } + + if len(result.Value) != 2 { + t.Error("value should have two items") + } + + if result.Value["foo"].Vstring != "one" { + t.Errorf("foo value should be 'one', got: %s", result.Value["foo"].Vstring) + } + + if result.Value["bar"].Vstring != "two" { + t.Errorf("bar value should be 'two', got: %s", result.Value["bar"].Vstring) + } +} + +func TestNestedType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + }, + } + + var result Nested + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } +} + +func TestNestedTypePointer(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": &map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + }, + } + + var result NestedPointer + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } +} + +func TestSlice(t *testing.T) { + t.Parallel() + + inputStringSlice := map[string]interface{}{ + "vfoo": "foo", + "vbar": []string{"foo", "bar", "baz"}, + } + + inputStringSlicePointer := map[string]interface{}{ + "vfoo": "foo", + "vbar": &[]string{"foo", "bar", "baz"}, + } + + outputStringSlice := &Slice{ + "foo", + []string{"foo", "bar", "baz"}, + } + + testSliceInput(t, inputStringSlice, outputStringSlice) + testSliceInput(t, inputStringSlicePointer, outputStringSlice) +} + +func TestInvalidSlice(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": 42, + } + + result := Slice{} + err := Decode(input, &result) + if err == nil { + t.Errorf("expected failure") + } +} + +func TestSliceOfStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "value": []map[string]interface{}{ + {"vstring": "one"}, + {"vstring": "two"}, + }, + } + + var result SliceOfStruct + err := Decode(input, &result) + if err != nil { + t.Fatalf("got unexpected error: %s", err) + } + + if len(result.Value) != 2 { + t.Fatalf("expected two values, got %d", len(result.Value)) + } + + if result.Value[0].Vstring != "one" { + t.Errorf("first value should be 'one', got: %s", result.Value[0].Vstring) + } + + if result.Value[1].Vstring != "two" { + t.Errorf("second value should be 'two', got: %s", result.Value[1].Vstring) + } +} + +func TestSliceToMap(t *testing.T) { + t.Parallel() + + input := []map[string]interface{}{ + map[string]interface{}{ + "foo": "bar", + }, + map[string]interface{}{ + "bar": "baz", + }, + } + + var result map[string]interface{} + err := WeakDecode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + expected := map[string]interface{}{ + "foo": "bar", + "bar": "baz", + } + if !reflect.DeepEqual(result, expected) { + t.Errorf("bad: %#v", result) + } +} + +func TestInvalidType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": 42, + } + + var result Basic + err := Decode(input, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok := err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "'Vstring' expected type 'string', got unconvertible type 'int'" { + t.Errorf("got unexpected error: %s", err) + } + + inputNegIntUint := map[string]interface{}{ + "vuint": -42, + } + + err = Decode(inputNegIntUint, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok = err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "cannot parse 'Vuint', -42 overflows uint" { + t.Errorf("got unexpected error: %s", err) + } + + inputNegFloatUint := map[string]interface{}{ + "vuint": -42.0, + } + + err = Decode(inputNegFloatUint, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok = err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "cannot parse 'Vuint', -42.000000 overflows uint" { + t.Errorf("got unexpected error: %s", err) + } +} + +func TestMetadata(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "Vuint": 42, + "foo": "bar", + }, + "bar": "nil", + } + + var md Metadata + var result Nested + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + expectedKeys := []string{"Vbar", "Vbar.Vstring", "Vbar.Vuint", "Vfoo"} + sort.Strings(md.Keys) + if !reflect.DeepEqual(md.Keys, expectedKeys) { + t.Fatalf("bad keys: %#v", md.Keys) + } + + expectedUnused := []string{"Vbar.foo", "bar"} + if !reflect.DeepEqual(md.Unused, expectedUnused) { + t.Fatalf("bad unused: %#v", md.Unused) + } +} + +func TestMetadata_Embedded(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var md Metadata + var result EmbeddedSquash + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + expectedKeys := []string{"Vstring", "Vunique"} + + sort.Strings(md.Keys) + if !reflect.DeepEqual(md.Keys, expectedKeys) { + t.Fatalf("bad keys: %#v", md.Keys) + } + + expectedUnused := []string{} + if !reflect.DeepEqual(md.Unused, expectedUnused) { + t.Fatalf("bad unused: %#v", md.Unused) + } +} + +func TestNonPtrValue(t *testing.T) { + t.Parallel() + + err := Decode(map[string]interface{}{}, Basic{}) + if err == nil { + t.Fatal("error should exist") + } + + if err.Error() != "result must be a pointer" { + t.Errorf("got unexpected error: %s", err) + } +} + +func TestTagged(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "bar", + "bar": "value", + } + + var result Tagged + err := Decode(input, &result) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if result.Value != "bar" { + t.Errorf("value should be 'bar', got: %#v", result.Value) + } + + if result.Extra != "value" { + t.Errorf("extra should be 'value', got: %#v", result.Extra) + } +} + +func TestWeakDecode(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "4", + "bar": "value", + } + + var result struct { + Foo int + Bar string + } + + if err := WeakDecode(input, &result); err != nil { + t.Fatalf("err: %s", err) + } + if result.Foo != 4 { + t.Fatalf("bad: %#v", result) + } + if result.Bar != "value" { + t.Fatalf("bad: %#v", result) + } +} + +func testSliceInput(t *testing.T, input map[string]interface{}, expected *Slice) { + var result Slice + err := Decode(input, &result) + if err != nil { + t.Fatalf("got error: %s", err) + } + + if result.Vfoo != expected.Vfoo { + t.Errorf("Vfoo expected '%s', got '%s'", expected.Vfoo, result.Vfoo) + } + + if result.Vbar == nil { + t.Fatalf("Vbar a slice, got '%#v'", result.Vbar) + } + + if len(result.Vbar) != len(expected.Vbar) { + t.Errorf("Vbar length should be %d, got %d", len(expected.Vbar), len(result.Vbar)) + } + + for i, v := range result.Vbar { + if v != expected.Vbar[i] { + t.Errorf( + "Vbar[%d] should be '%#v', got '%#v'", + i, expected.Vbar[i], v) + } + } +} diff --git a/vendor/vendor.json b/vendor/vendor.json new file mode 100644 index 000000000000..64634802f580 --- /dev/null +++ b/vendor/vendor.json @@ -0,0 +1,19 @@ +{ + "comment": "", + "ignore": "", + "package": [ + { + "checksumSHA1": "Y2M44k2lygN97UGTFDyiQJLabDU=", + "path": "github.com/gocarina/gocsv", + "revision": "80ac68b8d188bc11f9bc83b372a83bc65d4e5cde", + "revisionTime": "2016-08-03T06:53:29Z" + }, + { + "checksumSHA1": "Z0I4guD8AejM1hB3ltS/pTS60nQ=", + "path": "github.com/mitchellh/mapstructure", + "revision": "ca63d7c062ee3c9f34db231e352b60012b4fd0c1", + "revisionTime": "2016-08-08T18:12:53Z" + } + ], + "rootPath": "github.com/elastic/beats" +}