diff --git a/.travis.yml b/.travis.yml index 419c1357ba3..4f7b7eca46a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -109,6 +109,12 @@ jobs: go: $GO_VERSION stage: test + # Journalbeat + - os: linux + env: TARGETS="-C journalbeat testsuite" + go: $GO_VERSION + stage: test + # Generators - os: linux env: TARGETS="-C generator/metricbeat test" @@ -160,6 +166,7 @@ addons: - libpcap-dev - xsltproc - libxml2-utils + - libsystemd-journal-dev before_install: - python --version diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 6c506436f67..32212cefc47 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -21,6 +21,8 @@ https://github.com/elastic/beats/compare/v6.4.0...master[Check the HEAD diff] *Heartbeat* +*Journalbeat* + *Metricbeat* *Packetbeat* @@ -73,6 +75,8 @@ https://github.com/elastic/beats/compare/v6.4.0...master[Check the HEAD diff] - Fixed bug where HTTP responses with larger bodies would incorrectly report connection errors. {pull}8660[8660] +*Journalbeat* + *Metricbeat* - Fix golang.heap.gc.cpu_fraction type from long to float in Golang module. {pull}7789[7789] @@ -148,6 +152,10 @@ https://github.com/elastic/beats/compare/v6.4.0...master[Check the HEAD diff] - Added support for extra TLS/x509 metadata. {pull}7944[7944] - Added stats and state metrics for number of monitors and endpoints started. {pull}8621[8621] +*Journalbeat* + +- Add journalbeat. {pull}8703[8703] + *Metricbeat* - Add metrics about cache size to memcached module {pull}7740[7740] @@ -193,6 +201,8 @@ https://github.com/elastic/beats/compare/v6.4.0...master[Check the HEAD diff] *Heartbeat* - watch.poll_file is now deprecated and superceded by automatic config file reloading. +*Journalbeat* + *Metricbeat* - Redis `info` `replication.master_offset` has been deprecated in favor of `replication.master.offset`.{pull}7695[7695] - Redis `info` clients fields `longest_output_list` and `biggest_input_buf` have been renamed to `max_output_buffer` and `max_input_buffer` based on the names they will have in Redis 5.0, both fields will coexist during a time with the same value {pull}8167[8167]. diff --git a/Makefile b/Makefile index 8f7c5c2654c..b9b5e274a26 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ BUILD_DIR=$(CURDIR)/build COVERAGE_DIR=$(BUILD_DIR)/coverage -BEATS?=auditbeat filebeat heartbeat metricbeat packetbeat winlogbeat x-pack/functionbeat +BEATS?=auditbeat filebeat heartbeat journalbeat metricbeat packetbeat winlogbeat x-pack/functionbeat PROJECTS=libbeat $(BEATS) PROJECTS_ENV=libbeat filebeat metricbeat PYTHON_ENV?=$(BUILD_DIR)/python-env diff --git a/NOTICE.txt b/NOTICE.txt index e6cabf11286..205cafbd367 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -231,6 +231,37 @@ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------- +Dependency: github.com/coreos/go-systemd +Version: v17 +Revision: 39ca1b05acc7ad1220e09f133283b8859a8b71ab +License type (autodetected): Apache-2.0 +./vendor/github.com/coreos/go-systemd/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE----- +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). + +-------------------------------------------------------------------- +Dependency: github.com/coreos/pkg +Revision: 97fdf19511ea361ae1c100dd393cc47f8dcfa1e1 +License type (autodetected): Apache-2.0 +./vendor/github.com/coreos/pkg/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE----- +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). + -------------------------------------------------------------------- Dependency: github.com/davecgh/go-spew Version: v1.1.0 diff --git a/dev-tools/mage/crossbuild.go b/dev-tools/mage/crossbuild.go index 3e06fba5a15..c75bb57ec57 100644 --- a/dev-tools/mage/crossbuild.go +++ b/dev-tools/mage/crossbuild.go @@ -48,6 +48,9 @@ func init() { // CrossBuildOption defines a option to the CrossBuild target. type CrossBuildOption func(params *crossBuildParams) +// ImageSelectorFunc returns the name of the builder image. +type ImageSelectorFunc func(platform string) (string, error) + // ForPlatforms filters the platforms based on the given expression. func ForPlatforms(expr string) func(params *crossBuildParams) { return func(params *crossBuildParams) { @@ -78,16 +81,24 @@ func Serially() func(params *crossBuildParams) { } } +// ImageSelector returns the name of the selected builder image. +func ImageSelector(f ImageSelectorFunc) func(params *crossBuildParams) { + return func(params *crossBuildParams) { + params.ImageSelector = f + } +} + type crossBuildParams struct { - Platforms BuildPlatformList - Target string - Serial bool - InDir string + Platforms BuildPlatformList + Target string + Serial bool + InDir string + ImageSelector ImageSelectorFunc } // CrossBuild executes a given build target once for each target platform. func CrossBuild(options ...CrossBuildOption) error { - params := crossBuildParams{Platforms: Platforms, Target: defaultCrossBuildTarget} + params := crossBuildParams{Platforms: Platforms, Target: defaultCrossBuildTarget, ImageSelector: crossBuildImage} for _, opt := range options { opt(¶ms) } @@ -111,10 +122,10 @@ func CrossBuild(options ...CrossBuildOption) error { if !buildPlatform.Flags.CanCrossBuild() { return fmt.Errorf("unsupported cross build platform %v", buildPlatform.Name) } - builder := GolangCrossBuilder{buildPlatform.Name, params.Target, params.InDir} + builder := GolangCrossBuilder{buildPlatform.Name, params.Target, params.InDir, params.ImageSelector} if params.Serial { if err := builder.Build(); err != nil { - return errors.Wrapf(err, "failed cross-building target=%v for platform=%v", + return errors.Wrapf(err, "failed cross-building target=%v for platform=%v %v", params.ImageSelector, params.Target, buildPlatform.Name) } } else { @@ -174,15 +185,16 @@ func crossBuildImage(platform string) (string, error) { return "", err } - return beatsCrossBuildImage + ":" + goVersion + "-" + tagSuffix, nil + return BeatsCrossBuildImage + ":" + goVersion + "-" + tagSuffix, nil } // GolangCrossBuilder executes the specified mage target inside of the // associated golang-crossbuild container image for the platform. type GolangCrossBuilder struct { - Platform string - Target string - InDir string + Platform string + Target string + InDir string + ImageSelector ImageSelectorFunc } // Build executes the build inside of Docker. @@ -208,7 +220,7 @@ func (b GolangCrossBuilder) Build() error { } dockerRun := sh.RunCmd("docker", "run") - image, err := crossBuildImage(b.Platform) + image, err := b.ImageSelector(b.Platform) if err != nil { return errors.Wrap(err, "failed to determine golang-crossbuild image tag") } diff --git a/dev-tools/mage/godaemon.go b/dev-tools/mage/godaemon.go index 0f4cb2c0ec1..c856fca6448 100644 --- a/dev-tools/mage/godaemon.go +++ b/dev-tools/mage/godaemon.go @@ -23,6 +23,13 @@ import ( "os" ) +var ( + defaultCrossBuildGoDaemon = []CrossBuildOption{ + ForPlatforms("linux"), + WithTarget("buildGoDaemon"), + } +) + // BuildGoDaemon builds the go-deamon binary. func BuildGoDaemon() error { if GOOS != "linux" { @@ -67,6 +74,7 @@ func BuildGoDaemon() error { // CrossBuildGoDaemon cross-build the go-daemon binary using the // golang-crossbuild environment. -func CrossBuildGoDaemon() error { - return CrossBuild(ForPlatforms("linux"), WithTarget("buildGoDaemon")) +func CrossBuildGoDaemon(options ...CrossBuildOption) error { + opts := append(defaultCrossBuildGoDaemon, options...) + return CrossBuild(opts...) } diff --git a/dev-tools/mage/settings.go b/dev-tools/mage/settings.go index c9229e58c9d..ffaa54991d4 100644 --- a/dev-tools/mage/settings.go +++ b/dev-tools/mage/settings.go @@ -39,8 +39,9 @@ const ( fpmVersion = "1.10.0" // Docker images. See https://github.com/elastic/golang-crossbuild. - beatsFPMImage = "docker.elastic.co/beats-dev/fpm" - beatsCrossBuildImage = "docker.elastic.co/beats-dev/golang-crossbuild" + beatsFPMImage = "docker.elastic.co/beats-dev/fpm" + // BeatsCrossBuildImage is the image used for crossbuilding Beats. + BeatsCrossBuildImage = "docker.elastic.co/beats-dev/golang-crossbuild" elasticBeatsImportPath = "github.com/elastic/beats" ) diff --git a/journalbeat/.gitignore b/journalbeat/.gitignore new file mode 100644 index 00000000000..504279e50ee --- /dev/null +++ b/journalbeat/.gitignore @@ -0,0 +1,9 @@ +/.idea +/build +.DS_Store +.journalbeat_position +/journalbeat +/journalbeat.test +*.pyc +data/meta.json +/*.journal diff --git a/journalbeat/Dockerfile b/journalbeat/Dockerfile new file mode 100644 index 00000000000..975fce8f382 --- /dev/null +++ b/journalbeat/Dockerfile @@ -0,0 +1,17 @@ +FROM golang:1.10.3 +MAINTAINER Noémi Ványi + +RUN set -x && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + python-pip virtualenv libsystemd-dev libc6-dev-i386 gcc-arm-linux-gnueabi && \ + apt-get clean + +RUN pip install --upgrade setuptools + +# Setup work environment +ENV JOURNALBEAT_PATH /go/src/github.com/elastic/beats/journalbeat + +RUN mkdir -p $JOURNALBEAT_PATH/build/coverage +WORKDIR $JOURNALBEAT_PATH +HEALTHCHECK CMD exit 0 diff --git a/journalbeat/Makefile b/journalbeat/Makefile new file mode 100644 index 00000000000..62bf3778d21 --- /dev/null +++ b/journalbeat/Makefile @@ -0,0 +1,15 @@ +BEAT_NAME=journalbeat +BEAT_TITLE=Journalbeat +SYSTEM_TESTS=false +TEST_ENVIRONMENT=false +ES_BEATS?=.. + +# Path to the libbeat Makefile +-include $(ES_BEATS)/libbeat/scripts/Makefile + +.PHONY: before-build +before-build: + +# Collects all dependencies and then calls update +.PHONY: collect +collect: diff --git a/journalbeat/README.md b/journalbeat/README.md new file mode 100644 index 00000000000..69fccb38f90 --- /dev/null +++ b/journalbeat/README.md @@ -0,0 +1,5 @@ +# Journalbeat + +Journalbeat is an open source data collector to read and forward journal entries from Linuxes with systemd. + +## Getting started diff --git a/journalbeat/_meta/beat.yml b/journalbeat/_meta/beat.yml new file mode 100644 index 00000000000..597f1ada844 --- /dev/null +++ b/journalbeat/_meta/beat.yml @@ -0,0 +1,56 @@ +###################### Journalbeat Configuration Example ######################### + +# This file is an example configuration file highlighting only the most common +# options. The journalbeat.reference.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/journalbeat/index.html + +# For more available modules and options, please see the journalbeat.reference.yml sample +# configuration file. + +#=========================== Journalbeat inputs ============================= + +journalbeat.inputs: + # Paths that should be crawled and fetched. Possible values files and directories. + # When setting a directory, all journals under it are merged. + # When empty starts to read from local journal. +- paths: [] + + # The number of seconds to wait before trying to read again from journals. + #backoff: 1s + # The maximum number of seconds to wait before attempting to read again from journals. + #max_backoff: 60s + + # Position to start reading from journal. Valid values: head, tail, cursor + seek: tail + + # Exact matching for field values of events. + # Matching for nginx entries: "systemd.unit=nginx" + #include_matches: [] + + # Optional fields that you can specify to add additional information to the + # output. Fields can be scalar values, arrays, dictionaries, or any nested + # combination of these. + #fields: + # env: staging + + +#========================= Journalbeat global options ============================ +#journalbeat: + # Name of the registry file. If a relative path is used, it is considered relative to the + # data path. + #registry_file: registry + + # The number of seconds to wait before trying to read again from journals. + #backoff: 1s + # The maximum number of seconds to wait before attempting to read again from journals. + #max_backoff: 60s + + # Position to start reading from all journal. Possible values: head, tail, cursor + #seek: head + + # Exact matching for field values of events. + # Matching for nginx entries: "systemd.unit=nginx" + #matches: [] diff --git a/journalbeat/_meta/fields.common.yml b/journalbeat/_meta/fields.common.yml new file mode 100644 index 00000000000..7f0cedd4875 --- /dev/null +++ b/journalbeat/_meta/fields.common.yml @@ -0,0 +1,321 @@ +- key: common + title: "Common Journalbeat" + description: > + Contains common fields available in all event types. + fields: + - name: read_timestamp + description: > + The time when Journalbeat read the journal entry. + - name: coredump + type: group + description: > + Fields used by systemd-coredump kernel helper. + fields: + - name: unit + type: keyword + description: > + Annotations of messages containing coredumps from system units. + - name: user_unit + type: keyword + description: > + Annotations of messages containing coredumps from user units. + - name: journald + type: group + description: > + Fields provided by journald. + fields: + - name: object + type: group + description: > + Fields to log on behalf of a different program. + fields: + - name: audit + type: group + description: > + Audit fields of event. + fields: + - name: login_uid + type: long + required: false + example: 1000 + description: > + The login UID of the object process. + - name: session + type: long + required: false + example: 3 + description: > + The audit session of the object process. + - name: cmd + type: keyword + required: false + example: "/lib/systemd/systemd --user" + description: > + The command line of the process. + - name: name + type: keyword + required: false + example: "/lib/systemd/systemd" + description: > + Name of the executable. + - name: executable + type: keyword + required: false + description: > + Path to the the executable. + example: "/lib/systemd/systemd" + - name: uid + type: long + required: false + description: > + UID of the object process. + - name: gid + type: long + required: false + description: > + GID of the object process. + - name: pid + type: long + required: false + description: > + PID of the object process. + - name: systemd + type: group + description: > + Systemd fields of event. + fields: + - name: owner_uid + type: long + required: false + description: > + The UID of the owner. + - name: session + type: keyword + required: false + description: > + The ID of the systemd session. + - name: unit + type: keyword + required: false + description: > + The name of the systemd unit. + - name: user_unit + type: keyword + required: false + description: > + The name of the systemd user unit. + - name: kernel + type: group + description: > + Fields to log on behalf of a different program. + fields: + - name: device + type: keyword + required: false + description: > + The kernel device name. + - name: subsystem + type: keyword + required: false + description: > + The kernel subsystem name. + - name: device_symlinks + type: text + required: false + description: > + Additional symlink names pointing to the device node in /dev. + - name: device_node_path + type: text + required: false + description: > + The device node path of this device in /dev. + - name: device_name + type: text + required: false + description: > + The kernel device name as it shows up in the device tree below /sys. + - name: code + type: group + description: > + Fields of the code generating the event. + fields: + - name: file + type: text + required: false + example: "../src/core/manager.c" + description: > + The name of the source file where the log is generated. + - name: function + type: text + required: false + example: "job_log_status_message" + description: > + The name of the function which generated the log message. + - name: line + type: long + required: false + example: 123 + description: > + The line number of the code which generated the log message. + - name: process + type: group + description: > + Fields to log on behalf of a different program. + fields: + - name: audit + type: group + description: > + Audit fields of event. + fields: + - name: loginuid + type: long + required: false + example: 1000 + description: > + The login UID of the source process. + - name: session + type: long + required: false + example: 3 + description: > + The audit session of the source process. + - name: cmd + type: keyword + required: false + example: "/lib/systemd/systemd --user" + description: > + The command line of the process. + - name: name + type: keyword + required: false + example: "/lib/systemd/systemd" + description: > + Name of the executable. + - name: executable + type: keyword + required: false + description: > + Path to the the executable. + example: "/lib/systemd/systemd" + - name: pid + type: long + required: false + example: 1 + description: > + The ID of the process which logged the message. + - name: gid + type: long + required: false + example: 1 + description: > + The ID of the group which runs the process. + - name: uid + type: long + required: false + example: 1 + description: > + The ID of the user which runs the process. + - name: capabilites + required: false + description: > + The effective capabilites of the process. + - name: systemd + type: group + description: > + Fields of systemd. + fields: + - name: invocation_id + type: keyword + required: false + example: "8450f1672de646c88cd133aadd4f2d70" + description: > + The invocation ID for the runtime cycle of the unit the message was generated in. + - name: cgroup + type: keyword + required: false + example: "/user.slice/user-1234.slice/session-2.scope" + description: > + The control group path in the systemd hierarchy. + - name: owner_uid + type: long + required: false + description: > + The owner UID of the systemd user unit or systemd session. + - name: session + type: keyword + required: false + description: > + The ID of the systemd session. + - name: slice + type: keyword + required: false + example: "user-1234.slice" + description: > + The systemd slice unit. + - name: user_slice + type: keyword + required: false + description: > + The systemd user slice unit. + - name: unit + type: keyword + required: false + example: "nginx.service" + description: > + The name of the systemd unit. + - name: user_unit + type: keyword + required: false + example: "user-1234.slice" + description: > + The name of the systemd user unit. + - name: transport + type: keyword + required: true + example: "syslog" + description: > + How the log message was received by journald. + - name: host + type: group + description: > + Fields of the host. + fields: + - name: boot_id + type: text + required: false + example: "dd8c974asdf01dbe2ef26d7fasdf264c9" + description: > + The boot ID for the boot the log was generated in. + - name: syslog + type: group + description: > + Fields of the code generating the event. + fields: + - name: priority + type: long + required: false + example: 1 + description: > + The priority of the message. A syslog compatibility field. + - name: facility + type: long + required: false + example: 1 + description: > + The facility of the message. A syslog compatibility field. + - name: identifier + type: text + required: false + example: "su" + description: > + The identifier of the message. A syslog compatibility field. + - name: message + type: text + required: true + description: > + The logged message. + - name: custom + type: nested + required: false + description: > + Arbitrary fields coming from processes. diff --git a/journalbeat/_meta/kibana/5.x/index-pattern/journalbeat.json b/journalbeat/_meta/kibana/5.x/index-pattern/journalbeat.json new file mode 100644 index 00000000000..251aa05c6db --- /dev/null +++ b/journalbeat/_meta/kibana/5.x/index-pattern/journalbeat.json @@ -0,0 +1,6 @@ +{ + "fields": "[{\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"beat.name\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"beat.hostname\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"beat.timezone\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"beat.version\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"@timestamp\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"date\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"tags\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"fields\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": false, \"name\": \"error.message\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"error.code\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"number\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"error.type\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.provider\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.instance_id\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.instance_name\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.machine_type\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.availability_zone\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.project_id\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.region\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"docker.container.id\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"docker.container.image\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"docker.container.name\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"docker.container.labels\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"kubernetes.pod.name\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"kubernetes.namespace\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"kubernetes.labels\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"kubernetes.annotations\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"kubernetes.container.name\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"kubernetes.container.image\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"counter\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"number\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": false, \"name\": \"_id\", \"searchable\": false, \"indexed\": false, \"doc_values\": false, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"_type\", \"searchable\": true, \"indexed\": false, \"doc_values\": false, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": false, \"name\": \"_index\", \"searchable\": false, \"indexed\": false, \"doc_values\": false, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": false, \"name\": \"_score\", \"searchable\": false, \"indexed\": false, \"doc_values\": false, \"type\": \"number\", \"scripted\": false}]", + "fieldFormatMap": "{\"@timestamp\": {\"id\": \"date\"}}", + "timeFieldName": "@timestamp", + "title": "journalbeat-*" +} \ No newline at end of file diff --git a/journalbeat/_meta/kibana/default/index-pattern/journalbeat.json b/journalbeat/_meta/kibana/default/index-pattern/journalbeat.json new file mode 100644 index 00000000000..55e70bd81d1 --- /dev/null +++ b/journalbeat/_meta/kibana/default/index-pattern/journalbeat.json @@ -0,0 +1,16 @@ +{ + "version": "7.0.0-alpha1", + "objects": [ + { + "attributes": { + "fields": "[{\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"beat.name\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"beat.hostname\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"beat.timezone\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"beat.version\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"@timestamp\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"date\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"tags\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"fields\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": false, \"name\": \"error.message\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"error.code\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"number\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"error.type\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.provider\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.instance_id\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.instance_name\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.machine_type\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.availability_zone\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.project_id\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"meta.cloud.region\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"docker.container.id\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"docker.container.image\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"docker.container.name\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"docker.container.labels\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"kubernetes.pod.name\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"kubernetes.namespace\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"kubernetes.labels\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"kubernetes.annotations\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"kubernetes.container.name\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"kubernetes.container.image\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"counter\", \"searchable\": true, \"indexed\": true, \"doc_values\": true, \"type\": \"number\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": false, \"name\": \"_id\", \"searchable\": false, \"indexed\": false, \"doc_values\": false, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": true, \"name\": \"_type\", \"searchable\": true, \"indexed\": false, \"doc_values\": false, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": false, \"name\": \"_index\", \"searchable\": false, \"indexed\": false, \"doc_values\": false, \"type\": \"string\", \"scripted\": false}, {\"count\": 0, \"analyzed\": false, \"aggregatable\": false, \"name\": \"_score\", \"searchable\": false, \"indexed\": false, \"doc_values\": false, \"type\": \"number\", \"scripted\": false}]", + "fieldFormatMap": "{\"@timestamp\": {\"id\": \"date\"}}", + "timeFieldName": "@timestamp", + "title": "journalbeat-*" + }, + "version": 1, + "type": "index-pattern", + "id": "journalbeat-*" + } + ] +} \ No newline at end of file diff --git a/journalbeat/beater/journalbeat.go b/journalbeat/beater/journalbeat.go new file mode 100644 index 00000000000..d6f47b61334 --- /dev/null +++ b/journalbeat/beater/journalbeat.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package beater + +import ( + "fmt" + "sync" + "time" + + "github.com/elastic/beats/journalbeat/checkpoint" + "github.com/elastic/beats/journalbeat/cmd/instance" + "github.com/elastic/beats/journalbeat/input" + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/cfgwarn" + "github.com/elastic/beats/libbeat/logp" + + "github.com/elastic/beats/journalbeat/config" +) + +// Journalbeat instance +type Journalbeat struct { + inputs []*input.Input + done chan struct{} + config config.Config + + pipeline beat.Pipeline + checkpoint *checkpoint.Checkpoint + logger *logp.Logger +} + +// New returns a new Journalbeat instance +func New(b *beat.Beat, cfg *common.Config) (beat.Beater, error) { + cfgwarn.Experimental("Journalbeat is experimental.") + + config := config.DefaultConfig + if err := cfg.Unpack(&config); err != nil { + return nil, fmt.Errorf("error reading config file: %v", err) + } + + done := make(chan struct{}) + cp, err := checkpoint.NewCheckpoint(config.RegistryFile, 10, 1*time.Second) + if err != nil { + return nil, err + } + + instance.SetupJournalMetrics() + + var inputs []*input.Input + for _, c := range config.Inputs { + i, err := input.New(c, b.Publisher, done, cp.States()) + if err != nil { + return nil, err + } + inputs = append(inputs, i) + } + + bt := &Journalbeat{ + inputs: inputs, + done: done, + config: config, + pipeline: b.Publisher, + checkpoint: cp, + logger: logp.NewLogger("journalbeat"), + } + + return bt, nil +} + +// Run sets up the ACK handler and starts inputs to read and forward events to outputs. +func (bt *Journalbeat) Run(b *beat.Beat) error { + bt.logger.Info("journalbeat is running! Hit CTRL-C to stop it.") + defer bt.logger.Info("journalbeat is stopping") + + err := bt.pipeline.SetACKHandler(beat.PipelineACKHandler{ + ACKLastEvents: func(data []interface{}) { + for _, datum := range data { + if st, ok := datum.(checkpoint.JournalState); ok { + bt.checkpoint.PersistState(st) + } + } + }, + }) + if err != nil { + return err + } + defer bt.checkpoint.Shutdown() + + var wg sync.WaitGroup + for _, i := range bt.inputs { + wg.Add(1) + go bt.runInput(i, &wg) + } + + wg.Wait() + + return nil +} + +func (bt *Journalbeat) runInput(i *input.Input, wg *sync.WaitGroup) { + defer wg.Done() + i.Run() +} + +// Stop stops the beat and its inputs. +func (bt *Journalbeat) Stop() { + close(bt.done) + for _, i := range bt.inputs { + i.Stop() + } +} diff --git a/journalbeat/checkpoint/checkpoint.go b/journalbeat/checkpoint/checkpoint.go new file mode 100644 index 00000000000..f2c3bfacdab --- /dev/null +++ b/journalbeat/checkpoint/checkpoint.go @@ -0,0 +1,290 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package checkpoint persists event log state information to disk so that +// event log monitoring can resume from the last read event in the case of a +// restart or unexpected interruption. +package checkpoint + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "sync" + "time" + + "gopkg.in/yaml.v2" + + "github.com/elastic/beats/libbeat/logp" +) + +// Checkpoint persists event log state information to disk. +type Checkpoint struct { + wg sync.WaitGroup // WaitGroup used to wait on the shutdown of the checkpoint worker. + done chan struct{} // Channel for shutting down the checkpoint worker. + once sync.Once // Used to guarantee shutdown happens once. + file string // File where the state is persisted. + fileLock sync.RWMutex // Lock that protects concurrent reads/writes to file. + numUpdates int // Number of updates received since last persisting to disk. + maxUpdates int // Maximum number of updates to buffer before persisting to disk. + flushInterval time.Duration // Maximum time interval that can pass before persisting to disk. + sort []string // Slice used for sorting states map (store to save on mallocs). + + lock sync.RWMutex + states map[string]JournalState + + save chan JournalState +} + +// PersistedState represents the format of the data persisted to disk. +type PersistedState struct { + UpdateTime time.Time `yaml:"update_time"` + States []JournalState `yaml:"journal_entries"` +} + +// JournalState represents the state of an individual event log. +type JournalState struct { + Path string `yaml:"path"` + Cursor string `yaml:"cursor"` + RealtimeTimestamp uint64 `yaml:"realtime_timestamp"` + MonotonicTimestamp uint64 `yaml:"monotonic_timestamp"` +} + +// NewCheckpoint creates and returns a new Checkpoint. This method loads state +// information from disk if it exists and starts a goroutine for persisting +// state information to disk. Shutdown should be called when finished to +// guarantee any in-memory state information is flushed to disk. +// +// file is the name of the file where event log state is persisted as YAML. +// maxUpdates is the maximum number of updates checkpoint will accept before +// triggering a flush to disk. interval is maximum amount of time that can +// pass since the last flush before triggering a flush to disk (minimum value +// is 1s). +func NewCheckpoint(file string, maxUpdates int, interval time.Duration) (*Checkpoint, error) { + c := &Checkpoint{ + done: make(chan struct{}), + file: file, + maxUpdates: maxUpdates, + flushInterval: interval, + sort: make([]string, 0, 10), + states: make(map[string]JournalState), + save: make(chan JournalState, 1), + } + + // Minimum batch size. + if c.maxUpdates < 1 { + c.maxUpdates = 1 + } + + // Minimum flush interval. + if c.flushInterval < time.Second { + c.flushInterval = time.Second + } + + // Read existing state information: + ps, err := c.read() + if err != nil { + return nil, err + } + + if ps != nil { + for _, state := range ps.States { + c.states[state.Path] = state + } + } + + // Write the state file to verify we have have permissions. + err = c.flush() + if err != nil { + return nil, err + } + + c.wg.Add(1) + go c.run() + return c, nil +} + +// run is worker loop that reads incoming state information from the save +// channel and persists it when the number of changes reaches maxEvents or +// the amount of time since the last disk write reaches flushInterval. +func (c *Checkpoint) run() { + defer c.wg.Done() + defer c.persist() + + flushTimer := time.NewTimer(c.flushInterval) + defer flushTimer.Stop() +loop: + for { + select { + case <-c.done: + break loop + case s := <-c.save: + c.lock.Lock() + c.states[s.Path] = s + c.lock.Unlock() + c.numUpdates++ + if c.numUpdates < c.maxUpdates { + continue + } + case <-flushTimer.C: + } + + c.persist() + flushTimer.Reset(c.flushInterval) + } +} + +// Shutdown stops the checkpoint worker (which persists any state to disk as +// it stops). This method blocks until the checkpoint worker shutdowns. Calling +// this method more once is safe and has no effect. +func (c *Checkpoint) Shutdown() { + c.once.Do(func() { + close(c.done) + c.wg.Wait() + }) +} + +// States returns the current in-memory event log state. This state information +// is bootstrapped with any data found on disk at creation time. +func (c *Checkpoint) States() map[string]JournalState { + c.lock.RLock() + defer c.lock.RUnlock() + + copy := make(map[string]JournalState) + for k, v := range c.states { + copy[k] = v + } + + return copy +} + +// Persist queues the given event log state information to be written to disk. +func (c *Checkpoint) Persist(path, cursor string, realTs, monotonicTs uint64) { + c.PersistState(JournalState{ + Path: path, + Cursor: cursor, + RealtimeTimestamp: realTs, + MonotonicTimestamp: monotonicTs, + }) +} + +// PersistState queues the given event log state to be written to disk. +func (c *Checkpoint) PersistState(st JournalState) { + c.save <- st +} + +// persist writes the current state to disk if the in-memory state is dirty. +func (c *Checkpoint) persist() bool { + if c.numUpdates == 0 { + return false + } + + err := c.flush() + if err != nil { + return false + } + + logp.Debug("checkpoint", "Checkpoint saved to disk. numUpdates=%d", + c.numUpdates) + c.numUpdates = 0 + return true +} + +// flush writes the current state to disk. +func (c *Checkpoint) flush() error { + c.fileLock.Lock() + defer c.fileLock.Unlock() + + tempFile := c.file + ".new" + file, err := create(tempFile) + if os.IsNotExist(err) { + // Try to create directory if it does not exist. + if createDirErr := c.createDir(); createDirErr == nil { + file, err = create(tempFile) + } + } + + if err != nil { + return fmt.Errorf("Failed to flush state to disk. %v", err) + } + + // Sort persisted eventLogs by name. + c.sort = c.sort[:0] + for k := range c.states { + c.sort = append(c.sort, k) + } + sort.Strings(c.sort) + + ps := PersistedState{ + UpdateTime: time.Now().UTC(), + States: make([]JournalState, len(c.sort)), + } + for i, name := range c.sort { + ps.States[i] = c.states[name] + } + + data, err := yaml.Marshal(ps) + if err != nil { + file.Close() + return fmt.Errorf("Failed to flush state to disk. Could not marshal "+ + "data to YAML. %v", err) + } + + _, err = file.Write(data) + if err != nil { + file.Close() + return fmt.Errorf("Failed to flush state to disk. Could not write to "+ + "%s. %v", tempFile, err) + } + + file.Close() + err = os.Rename(tempFile, c.file) + return err +} + +// read loads the persisted state from disk. If the file does not exists then +// the method returns nil and no error. +func (c *Checkpoint) read() (*PersistedState, error) { + c.fileLock.RLock() + defer c.fileLock.RUnlock() + + contents, err := ioutil.ReadFile(c.file) + if err != nil { + if os.IsNotExist(err) { + err = nil + } + return nil, err + } + + ps := &PersistedState{} + err = yaml.Unmarshal(contents, ps) + if err != nil { + return nil, err + } + + return ps, nil +} + +// createDir creates the directory in which the state file will reside if the +// directory does not already exist. +func (c *Checkpoint) createDir() error { + dir := filepath.Dir(c.file) + logp.Info("Creating %s if it does not exist.", dir) + return os.MkdirAll(dir, os.FileMode(0750)) +} diff --git a/journalbeat/checkpoint/checkpoint_test.go b/journalbeat/checkpoint/checkpoint_test.go new file mode 100644 index 00000000000..a5edeb83ead --- /dev/null +++ b/journalbeat/checkpoint/checkpoint_test.go @@ -0,0 +1,218 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build !integration + +package checkpoint + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func eventually(t *testing.T, predicate func() (bool, error), timeout time.Duration) { + const minInterval = time.Millisecond * 5 + const maxInterval = time.Millisecond * 500 + + checkInterval := timeout / 100 + if checkInterval < minInterval { + checkInterval = minInterval + } + if checkInterval > maxInterval { + checkInterval = maxInterval + } + for deadline, first := time.Now().Add(timeout), true; first || time.Now().Before(deadline); first = false { + ok, err := predicate() + if err != nil { + t.Fatal("predicate failed with error:", err) + return + } + if ok { + return + } + time.Sleep(checkInterval) + } + t.Fatal("predicate is not true after", timeout) +} + +// Test that a write is triggered when the maximum number of updates is reached. +func TestWriteMaxUpdates(t *testing.T) { + dir, err := ioutil.TempDir("", "wlb-checkpoint-test") + if err != nil { + t.Fatal(err) + } + defer func() { + err := os.RemoveAll(dir) + if err != nil { + t.Fatal(err) + } + }() + + file := filepath.Join(dir, "some", "new", "dir", ".winlogbeat.yml") + if !assert.False(t, fileExists(file), "%s should not exist", file) { + return + } + + cp, err := NewCheckpoint(file, 2, time.Hour) + if err != nil { + t.Fatal(err) + } + defer cp.Shutdown() + + // Send update - it's not written to disk but it's in memory. + cp.Persist("", "", 123456, 123456) + found := false + eventually(t, func() (bool, error) { + _, found = cp.States()[""] + return found, nil + }, time.Second*15) + assert.True(t, found) + + ps, err := cp.read() + if err != nil { + t.Fatal("read failed", err) + } + assert.Len(t, ps.States, 0) + + // Send update - it is written to disk. + cp.Persist("", "", 123456, 123456) + eventually(t, func() (bool, error) { + ps, err = cp.read() + return ps != nil && len(ps.States) > 0, err + }, time.Second*15) + + if assert.Len(t, ps.States, 1, "state not written, could be a flush timing issue, retry") { + assert.Equal(t, "", ps.States[0].Path) + assert.Equal(t, "", ps.States[0].Cursor) + } +} + +// Test that a write is triggered when the maximum time period since the last +// write is reached. +func TestWriteTimedFlush(t *testing.T) { + dir, err := ioutil.TempDir("", "wlb-checkpoint-test") + if err != nil { + t.Fatal(err) + } + defer func() { + err := os.RemoveAll(dir) + if err != nil { + t.Fatal(err) + } + }() + + file := filepath.Join(dir, ".winlogbeat.yml") + if !assert.False(t, fileExists(file), "%s should not exist", file) { + return + } + + cp, err := NewCheckpoint(file, 100, time.Second) + if err != nil { + t.Fatal(err) + } + defer cp.Shutdown() + + // Send update then wait longer than the flush interval and it should be + // on disk. + cp.Persist("", "cursor", 123456, 123456) + eventually(t, func() (bool, error) { + ps, err := cp.read() + return ps != nil && len(ps.States) > 0, err + }, time.Second*15) + + ps, err := cp.read() + if err != nil { + t.Fatal("read failed", err) + } + if assert.Len(t, ps.States, 1) { + assert.Equal(t, "", ps.States[0].Path) + assert.Equal(t, "cursor", ps.States[0].Cursor) + } +} + +// Test that createDir creates the directory with 0750 permissions. +func TestCreateDir(t *testing.T) { + dir, err := ioutil.TempDir("", "wlb-checkpoint-test") + if err != nil { + t.Fatal(err) + } + defer func() { + err := os.RemoveAll(dir) + if err != nil { + t.Fatal(err) + } + }() + + stateDir := filepath.Join(dir, "state", "dir", "does", "not", "exists") + file := filepath.Join(stateDir, ".winlogbeat.yml") + cp := &Checkpoint{file: file} + + if !assert.False(t, fileExists(file), "%s should not exist", file) { + return + } + if err = cp.createDir(); err != nil { + t.Fatal("createDir", err) + } + if !assert.True(t, fileExists(stateDir), "%s should exist", file) { + return + } + + // mkdir on Windows does not pass the POSIX mode to the CreateDirectory + // syscall so doesn't test the mode. + if runtime.GOOS != "windows" { + fileInfo, err := os.Stat(stateDir) + if assert.NoError(t, err) { + assert.Equal(t, true, fileInfo.IsDir()) + assert.Equal(t, os.FileMode(0750), fileInfo.Mode().Perm()) + } + } +} + +// Test createDir when the directory already exists to verify that no error is +// returned. +func TestCreateDirAlreadyExists(t *testing.T) { + dir, err := ioutil.TempDir("", "wlb-checkpoint-test") + if err != nil { + t.Fatal(err) + } + defer func() { + err := os.RemoveAll(dir) + if err != nil { + t.Fatal(err) + } + }() + + file := filepath.Join(dir, ".winlogbeat.yml") + cp := &Checkpoint{file: file} + + if !assert.True(t, fileExists(dir), "%s should exist", file) { + return + } + assert.NoError(t, cp.createDir()) +} + +// fileExists returns true if the specified file exists. +func fileExists(file string) bool { + _, err := os.Stat(file) + return !os.IsNotExist(err) +} diff --git a/journalbeat/checkpoint/file_unix.go b/journalbeat/checkpoint/file_unix.go new file mode 100644 index 00000000000..ee3201d6100 --- /dev/null +++ b/journalbeat/checkpoint/file_unix.go @@ -0,0 +1,26 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build !windows + +package checkpoint + +import "os" + +func create(path string) (*os.File, error) { + return os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, 0600) +} diff --git a/journalbeat/checkpoint/file_windows.go b/journalbeat/checkpoint/file_windows.go new file mode 100644 index 00000000000..6644d751096 --- /dev/null +++ b/journalbeat/checkpoint/file_windows.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package checkpoint + +import ( + "os" + "syscall" +) + +const ( + _FILE_FLAG_WRITE_THROUGH = 0x80000000 +) + +func create(path string) (*os.File, error) { + return createWriteThroughFile(path) +} + +// createWriteThroughFile creates a file whose write operations do not go +// through any intermediary cache, they go directly to disk. +func createWriteThroughFile(path string) (*os.File, error) { + if len(path) == 0 { + return nil, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return nil, err + } + + h, err := syscall.CreateFile( + pathp, // Path + syscall.GENERIC_READ|syscall.GENERIC_WRITE, // Access Mode + uint32(syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE), // Share Mode + nil, // Security Attributes + syscall.CREATE_ALWAYS, // Create Mode + uint32(syscall.FILE_ATTRIBUTE_NORMAL|_FILE_FLAG_WRITE_THROUGH), // Flags and Attributes + 0) // Template File + + return os.NewFile(uintptr(h), path), err +} diff --git a/journalbeat/cmd/instance/metrics.go b/journalbeat/cmd/instance/metrics.go new file mode 100644 index 00000000000..dee1d6eaeb9 --- /dev/null +++ b/journalbeat/cmd/instance/metrics.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package instance + +import ( + "fmt" + + "github.com/coreos/go-systemd/sdjournal" + + "github.com/elastic/beats/libbeat/monitoring" +) + +var ( + metrics *monitoring.Registry + journals map[string]*sdjournal.Journal +) + +// SetupJournalMetrics initializes and registers monitoring functions. +func SetupJournalMetrics() { + metrics = monitoring.Default.NewRegistry("journalbeat") + journals = make(map[string]*sdjournal.Journal) + + monitoring.NewFunc(metrics, "journals", reportJournalSizes, monitoring.Report) +} + +// AddJournalToMonitor adds a new journal which has to be monitored. +func AddJournalToMonitor(path string, journal *sdjournal.Journal) { + journals[path] = journal +} + +// StopMonitoringJournal stops monitoring the journal under the path. +func StopMonitoringJournal(path string) { + delete(journals, path) +} + +func reportJournalSizes(m monitoring.Mode, V monitoring.Visitor) { + i := 0 + for path, journal := range journals { + s, err := journal.GetUsage() + if err != nil { + continue + } + + ns := fmt.Sprintf("journal_%d", i) + monitoring.ReportNamespace(V, ns, func() { + monitoring.ReportString(V, "path", path) + monitoring.ReportInt(V, "size_in_bytes", int64(s)) + }) + i++ + } +} diff --git a/journalbeat/cmd/root.go b/journalbeat/cmd/root.go new file mode 100644 index 00000000000..d1afa4fdfcc --- /dev/null +++ b/journalbeat/cmd/root.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package cmd + +import ( + "github.com/elastic/beats/journalbeat/beater" + + cmd "github.com/elastic/beats/libbeat/cmd" +) + +// Name of this beat +var Name = "journalbeat" + +// RootCmd to handle beats cli +var RootCmd = cmd.GenRootCmd(Name, "", beater.New) diff --git a/journalbeat/config/config.go b/journalbeat/config/config.go new file mode 100644 index 00000000000..bcbed93df37 --- /dev/null +++ b/journalbeat/config/config.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Config is put into a different package to prevent cyclic imports in case +// it is needed in several locations + +package config + +import ( + "time" + + "github.com/elastic/beats/libbeat/common" +) + +// Config stores the configuration of Journalbeat +type Config struct { + Inputs []*common.Config `config:"inputs"` + RegistryFile string `config:"registry_file"` + Backoff time.Duration `config:"backoff" validate:"min=0,nonzero"` + MaxBackoff time.Duration `config:"max_backoff" validate:"min=0,nonzero"` + Seek string `config:"seek"` + Matches []string `config:"include_matches"` +} + +// DefaultConfig are the defaults of a Journalbeat instance +var DefaultConfig = Config{ + RegistryFile: "registry", + Backoff: 1 * time.Second, + MaxBackoff: 60 * time.Second, + Seek: "tail", +} diff --git a/journalbeat/config/config_test.go b/journalbeat/config/config_test.go new file mode 100644 index 00000000000..f4373cc528b --- /dev/null +++ b/journalbeat/config/config_test.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build !integration + +package config diff --git a/journalbeat/docs/config-options.asciidoc b/journalbeat/docs/config-options.asciidoc new file mode 100644 index 00000000000..722d9b9534a --- /dev/null +++ b/journalbeat/docs/config-options.asciidoc @@ -0,0 +1,102 @@ +[id="configuration-{beatname_lc}-options"] +== Configure inputs + +++++ +Configure inputs +++++ + +IMPORTANT: This documentation is placeholder content. It has not yet been reviewed. + +By default, {beatname_uc} reads log events from the default systemd journals. To +specify other journal files, set the <<{beatname_lc}-paths,`paths`>> option in +the +{beatname_lc}.inputs+ section of the +{beatname_lc}.yml+ file. + +The list of paths is a YAML array, so each path begins with a dash (-). Each +path can be a directory path (to collect events from all journals in a +directory), or a file path. For example: + +["source","sh",subs="attributes"] +---- +{beatname_lc}.inputs: +- paths: + - "/dev/log" + - "/var/log/messages/my-journal-file" +---- + +Within the +{beatname_lc}.inputs+ section, you can also specify options that +control the position where {beatname_uc} starts reading the journal file, and +set filters to reduce the fields that {beatname_uc} needs to process. See +<<{beatname_lc}-options>> for a list of available options. + +[float] +=== Configuration examples + +The following example shows how to monitor multiple journals under the +same directory. {beatname_uc} merges all journals under the directory into a +single journal and reads them. With `seek` set to `cursor`, {beatname_uc} +starts reading at the beginning of the journal, but will continue reading where +it left off after a reload or restart. + +["source","sh",subs="attributes"] +---- +{beatname_lc}.inputs: +- paths: ["/path/to/journal/directory"] + seek: cursor +---- + +The following examples show how to get Redis events from a Docker container that +is tagged as `redis`. + +//TODO: Add a better explanation of the options. + +This example uses the translated fields by Journald: + +["source","sh",subs="attributes"] +---- +{beatname_lc}.inputs: +- paths: [] + include_matches: + - "container.image.tag=redis" + - "process.name=redis" +---- + +This example uses the field names from the systemd journal: + +["source","sh",subs="attributes"] +---- +{beatname_lc}.inputs: +- paths: [] + include_matches: + - "CONTAINER_TAG=redis" + - "_COMM=redis" +---- + +[id="{beatname_lc}-options"] +[float] +=== Configuration options +You can specify the following options to configure how {beatname_uc} reads the +journal files. + +[float] +[id="{beatname_lc}-paths"] +==== `paths` + +A list of paths that will be crawled and fetched. Each path can be a directory +path (to collect events from all journals in a directory), or a file path. If +you specify a directory, {beatname_uc} merges all journals under the directory +into a single journal and reads them. + +//QUESTION: Are globs supported? If so, I need to add more detail here. + +[float] +[id="{beatname_lc}-seek"] +==== `seek` + +The position to start reading the journal from. Valid settings are: + +* `head`: Starts reading at the beginning of the file. +* `tail`: Starts reading at the end of the file. +* `cursor`: Initially starts reading at the beginning of the file, but continues +reading where it left off after a reload or restart. + +//TODO: ADD OTHER OPTIONS HERE. \ No newline at end of file diff --git a/journalbeat/docs/configuring-howto.asciidoc b/journalbeat/docs/configuring-howto.asciidoc new file mode 100644 index 00000000000..95d56d427f2 --- /dev/null +++ b/journalbeat/docs/configuring-howto.asciidoc @@ -0,0 +1,90 @@ +[id="configuring-howto-{beatname_lc}"] += Configuring {beatname_uc} + +[partintro] +-- + +IMPORTANT: This documentation is placeholder content. It has not yet been reviewed. + +Before modifying configuration settings, make sure you've completed the +<<{beatname_lc}-configuration,configuration steps>> in the Getting Started. +This section describes some common use cases for changing configuration options. + +To configure {beatname_uc}, you edit the configuration file. For rpm and deb, +you’ll find the configuration file at +/etc/{beatname_lc}/{beatname_lc}.yml+. +There's also a full example configuration file at ++/etc/{beatname_lc}/{beatname_lc}.reference.yml+ that shows all non-deprecated +options. For mac and win, look in the archive that you extracted. + +The {beatname_uc} configuration file uses http://yaml.org/[YAML] for its syntax. +See the {libbeat}/config-file-format.html[Config File Format] section of the +_{libbeat_docs}_ for more about the structure of the config file. + +The following topics describe how to configure {beatname_uc}: + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +//* <> +* <> +* <> +* <> +//* <<{beatname_lc}-reference-yml>> + +-- + +include::./config-options.asciidoc[] + +include::./general-options.asciidoc[] + +:allplatforms: +include::../../libbeat/docs/queueconfig.asciidoc[] +:allplatforms!: + +include::../../libbeat/docs/outputconfig.asciidoc[] + +include::../../libbeat/docs/shared-ssl-config.asciidoc[] + +include::./filtering.asciidoc[] + +include::../../libbeat/docs/shared-config-ingest.asciidoc[] + +include::../../libbeat/docs/shared-path-config.asciidoc[] + +include::../../libbeat/docs/shared-kibana-config.asciidoc[] + +include::../../libbeat/docs/setup-config.asciidoc[] + +include::../../libbeat/docs/loggingconfig.asciidoc[] + +:standalone: +include::../../libbeat/docs/shared-env-vars.asciidoc[] +:standalone!: + +//OPEN ISSUE: DO WE NEED AUTODISCOVER? +//include::../../libbeat/docs/shared-autodiscover.asciidoc[] + +:standalone: +:allplatforms: +include::../../libbeat/docs/yaml.asciidoc[] +:standalone!: +:allplatforms!: + +include::../../libbeat/docs/regexp.asciidoc[] + +include::../../libbeat/docs/http-endpoint.asciidoc[] + +// TODO: Uncomment the following include statement when the reference yaml file +// is available in the repo. Also uncomment the link in the jump list at the top +// of this file. +//include::../../libbeat/docs/reference-yml.asciidoc[] diff --git a/journalbeat/docs/faq.asciidoc b/journalbeat/docs/faq.asciidoc new file mode 100644 index 00000000000..6d5b4ed296e --- /dev/null +++ b/journalbeat/docs/faq.asciidoc @@ -0,0 +1,24 @@ +[[faq]] +== Frequently asked questions + +IMPORTANT: This documentation is placeholder content. It has not yet been reviewed. + +This section contains frequently asked questions about {beatname_uc}. Also check +out the https://discuss.elastic.co/c/beats/{beatname_lc}[{beatname_uc} +discussion forum]. + +[float] +[id="{beatname_lc}-sometext"] +=== Question 1? + +ADD DESCRIPTION HERE + +[float] +[id="{beatname_lc}-sometext2"] +=== Question 2? + +ADD DESCRIPTION HERE + +include::../../libbeat/docs/faq-limit-bandwidth.asciidoc[] + +include::../../libbeat/docs/shared-faq.asciidoc[] diff --git a/journalbeat/docs/fields.asciidoc b/journalbeat/docs/fields.asciidoc new file mode 100644 index 00000000000..e40fa96b920 --- /dev/null +++ b/journalbeat/docs/fields.asciidoc @@ -0,0 +1,2536 @@ + +//// +This file is generated! See _meta/fields.yml and scripts/generate_field_docs.py +//// + +[[exported-fields]] += Exported fields + +[partintro] + +-- +This document describes the fields that are exported by Journalbeat. They are +grouped in the following categories: + +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +-- +[[exported-fields-beat]] +== Beat fields + +Contains common beat fields available in all event types. + + + +*`beat.name`*:: ++ +-- +The name of the Beat sending the log messages. If the Beat name is set in the configuration file, then that value is used. If it is not set, the hostname is used. To set the Beat name, use the `name` option in the configuration file. + + +-- + +*`beat.hostname`*:: ++ +-- +The hostname as returned by the operating system on which the Beat is running. + + +-- + +*`beat.timezone`*:: ++ +-- +The timezone as returned by the operating system on which the Beat is running. + + +-- + +*`beat.version`*:: ++ +-- +The version of the beat that generated this event. + + +-- + +*`@timestamp`*:: ++ +-- +type: date + +example: August 26th 2016, 12:35:53.332 + +format: date + +required: True + +The timestamp when the event log record was generated. + + +-- + +*`tags`*:: ++ +-- +Arbitrary tags that can be set per Beat and per transaction type. + + +-- + +*`fields`*:: ++ +-- +type: object + +Contains user configurable fields. + + +-- + +[float] +== error fields + +Error fields containing additional info in case of errors. + + + +*`error.type`*:: ++ +-- +type: keyword + +Error type. + + +-- + +[[exported-fields-cloud]] +== Cloud provider metadata fields + +Metadata from cloud providers added by the add_cloud_metadata processor. + + + +*`meta.cloud.provider`*:: ++ +-- +example: ec2 + +Name of the cloud provider. Possible values are ec2, gce, or digitalocean. + + +-- + +*`meta.cloud.instance_id`*:: ++ +-- +Instance ID of the host machine. + + +-- + +*`meta.cloud.instance_name`*:: ++ +-- +Instance name of the host machine. + + +-- + +*`meta.cloud.machine_type`*:: ++ +-- +example: t2.medium + +Machine type of the host machine. + + +-- + +*`meta.cloud.availability_zone`*:: ++ +-- +example: us-east-1c + +Availability zone in which this host is running. + + +-- + +*`meta.cloud.project_id`*:: ++ +-- +example: project-x + +Name of the project in Google Cloud. + + +-- + +*`meta.cloud.region`*:: ++ +-- +Region in which this host is running. + + +-- + +[[exported-fields-common]] +== Common Journalbeat fields + +Contains common fields available in all event types. + + + +*`read_timestamp`*:: ++ +-- +The time when Journalbeat read the journal entry. + + +-- + +[float] +== coredump fields + +Fields used by systemd-coredump kernel helper. + + + +*`coredump.unit`*:: ++ +-- +type: keyword + +Annotations of messages containing coredumps from system units. + + +-- + +*`coredump.user_unit`*:: ++ +-- +type: keyword + +Annotations of messages containing coredumps from user units. + + +-- + +[float] +== journald fields + +Fields to log on behalf of a different program. + + + +[float] +== audit fields + +Audit fields of event. + + + +*`journald.audit.loginuid`*:: ++ +-- +type: long + +example: 1000 + +required: False + +The login UID of the source process. + + +-- + +*`journald.audit.session`*:: ++ +-- +type: long + +example: 3 + +required: False + +The audit session of the source process. + + +-- + +*`journald.cmd`*:: ++ +-- +type: keyword + +example: /lib/systemd/systemd --user + +required: False + +The command line of the process. + + +-- + +*`journald.name`*:: ++ +-- +type: keyword + +example: /lib/systemd/systemd + +required: False + +Name of the executable. + + +-- + +*`journald.executable`*:: ++ +-- +type: keyword + +example: /lib/systemd/systemd + +required: False + +Path to the the executable. + + +-- + +*`journald.pid`*:: ++ +-- +type: long + +example: 1 + +required: False + +The ID of the process which logged the message. + + +-- + +*`journald.gid`*:: ++ +-- +type: long + +example: 1 + +required: False + +The ID of the group which runs the process. + + +-- + +*`journald.uid`*:: ++ +-- +type: long + +example: 1 + +required: False + +The ID of the user which runs the process. + + +-- + +*`journald.capabilites`*:: ++ +-- +required: False + +The effective capabilites of the process. + + +-- + +[float] +== systemd fields + +Fields of systemd. + + + +*`systemd.invocation_id`*:: ++ +-- +type: keyword + +example: 8450f1672de646c88cd133aadd4f2d70 + +required: False + +The invocation ID for the runtime cycle of the unit the message was generated in. + + +-- + +*`systemd.cgroup`*:: ++ +-- +type: keyword + +example: /user.slice/user-1234.slice/session-2.scope + +required: False + +The control group path in the systemd hierarchy. + + +-- + +*`systemd.owner_uid`*:: ++ +-- +type: long + +required: False + +The owner UID of the systemd user unit or systemd session. + + +-- + +*`systemd.session`*:: ++ +-- +type: keyword + +required: False + +The ID of the systemd session. + + +-- + +*`systemd.slice`*:: ++ +-- +type: keyword + +example: user-1234.slice + +required: False + +The systemd slice unit. + + +-- + +*`systemd.user_slice`*:: ++ +-- +type: keyword + +required: False + +The systemd user slice unit. + + +-- + +*`systemd.unit`*:: ++ +-- +type: keyword + +example: nginx.service + +required: False + +The name of the systemd unit. + + +-- + +*`systemd.user_unit`*:: ++ +-- +type: keyword + +example: user-1234.slice + +required: False + +The name of the systemd user unit. + + +-- + +*`systemd.transport`*:: ++ +-- +type: keyword + +example: syslog + +required: True + +How the log message was received by journald. + + +-- + +[float] +== host fields + +Fields of the host. + + + +*`host.boot_id`*:: ++ +-- +type: text + +example: dd8c974asdf01dbe2ef26d7fasdf264c9 + +required: False + +The boot ID for the boot the log was generated in. + + +-- + +[float] +== syslog fields + +Fields of the code generating the event. + + + +*`syslog.priority`*:: ++ +-- +type: long + +example: 1 + +required: False + +The priority of the message. A syslog compatibility field. + + +-- + +*`syslog.facility`*:: ++ +-- +type: long + +example: 1 + +required: False + +The facility of the message. A syslog compatibility field. + + +-- + +*`syslog.identifier`*:: ++ +-- +type: text + +example: su + +required: False + +The identifier of the message. A syslog compatibility field. + + +-- + +*`message`*:: ++ +-- +type: text + +required: True + +The logged message. + + +-- + +*`custom`*:: ++ +-- +type: nested + +required: False + +Arbitrary fields coming from processes. + + +-- + +[[exported-fields-docker-processor]] +== Docker fields + +Docker stats collected from Docker. + + + + +*`docker.container.id`*:: ++ +-- +type: keyword + +Unique container id. + + +-- + +*`docker.container.image`*:: ++ +-- +type: keyword + +Name of the image the container was built on. + + +-- + +*`docker.container.name`*:: ++ +-- +type: keyword + +Container name. + + +-- + +*`docker.container.labels`*:: ++ +-- +type: object + +Image labels. + + +-- + +[[exported-fields-ecs]] +== ECS fields + +ECS fields. + + + +[float] +== agent fields + +The agent fields contain the data about the agent/client/shipper that created the event. + + + +*`agent.version`*:: ++ +-- +type: keyword + +example: 6.0.0-rc2 + +Version of the agent. + + +-- + +*`agent.name`*:: ++ +-- +type: keyword + +example: filebeat + +Name of the agent. + + +-- + +*`agent.id`*:: ++ +-- +type: keyword + +example: 8a4f500d + +Unique identifier of this agent (if one exists). +Example: For Beats this would be beat.id. + + +-- + +*`agent.ephemeral_id`*:: ++ +-- +type: keyword + +example: 8a4f500f + +Ephemeral identifier of this agent (if one exists). +This id normally changes across restarts, but `agent.id` does not. + + +-- + +[float] +== base fields + +The base set contains all fields which are on the top level. These fields are common across all types of events. + + + +*`base.@timestamp`*:: ++ +-- +type: date + +example: 2016-05-23T08:05:34.853Z + +required: True + +Date/time when the event originated. +For log events this is the date/time when the event was generated, and not when it was read. +Required field for all events. + + +-- + +*`base.tags`*:: ++ +-- +type: keyword + +example: ["production", "env2"] + +List of keywords used to tag each event. + + +-- + +*`base.labels`*:: ++ +-- +type: object + +example: {'key2': 'value2', 'key1': 'value1'} + +Key/value pairs. +Can be used to add meta information to events. Should not contain nested objects. All values are stored as keyword. +Example: `docker` and `k8s` labels. + + +-- + +*`base.message`*:: ++ +-- +type: text + +example: Hello World + +For log events the message field contains the log message. +In other use cases the message field can be used to concatenate different values which are then freely searchable. If multiple messages exist, they can be combined into one message. + + +-- + +[float] +== cloud fields + +Fields related to the cloud or infrastructure the events are coming from. + + + +*`cloud.provider`*:: ++ +-- +type: keyword + +example: ec2 + +Name of the cloud provider. Example values are ec2, gce, or digitalocean. + + +-- + +*`cloud.availability_zone`*:: ++ +-- +type: keyword + +example: us-east-1c + +Availability zone in which this host is running. + + +-- + +*`cloud.region`*:: ++ +-- +type: keyword + +example: us-east-1 + +Region in which this host is running. + + +-- + +*`cloud.instance.id`*:: ++ +-- +type: keyword + +example: i-1234567890abcdef0 + +Instance ID of the host machine. + + +-- + +*`cloud.instance.name`*:: ++ +-- +type: keyword + +Instance name of the host machine. + + +-- + +*`cloud.machine.type`*:: ++ +-- +type: keyword + +example: t2.medium + +Machine type of the host machine. + + +-- + +*`cloud.account.id`*:: ++ +-- +type: keyword + +example: 666777888999 + +The cloud account or organization id used to identify different entities in a multi-tenant environment. +Examples: AWS account id, Google Cloud ORG Id, or other unique identifier. + + +-- + +[float] +== container fields + +Container fields are used for meta information about the specific container that is the source of information. These fields help correlate data based containers from any runtime. + + + +*`container.runtime`*:: ++ +-- +type: keyword + +example: docker + +Runtime managing this container. + + +-- + +*`container.id`*:: ++ +-- +type: keyword + +Unique container id. + + +-- + +*`container.image.name`*:: ++ +-- +type: keyword + +Name of the image the container was built on. + + +-- + +*`container.image.tag`*:: ++ +-- +type: keyword + +Container image tag. + + +-- + +*`container.name`*:: ++ +-- +type: keyword + +Container name. + + +-- + +*`container.labels`*:: ++ +-- +type: object + +Image labels. + + +-- + +[float] +== destination fields + +Destination fields describe details about the destination of a packet/event. + + + +*`destination.ip`*:: ++ +-- +type: ip + +IP address of the destination. +Can be one or multiple IPv4 or IPv6 addresses. + + +-- + +*`destination.hostname`*:: ++ +-- +type: keyword + +Hostname of the destination. + + +-- + +*`destination.port`*:: ++ +-- +type: long + +Port of the destination. + + +-- + +*`destination.mac`*:: ++ +-- +type: keyword + +MAC address of the destination. + + +-- + +*`destination.domain`*:: ++ +-- +type: keyword + +Destination domain. + + +-- + +*`destination.subdomain`*:: ++ +-- +type: keyword + +Destination subdomain. + + +-- + +[float] +== device fields + +Device fields are used to provide additional information about the device that is the source of the information. This could be a firewall, network device, etc. + + + +*`device.mac`*:: ++ +-- +type: keyword + +MAC address of the device + + +-- + +*`device.ip`*:: ++ +-- +type: ip + +IP address of the device. + + +-- + +*`device.hostname`*:: ++ +-- +type: keyword + +Hostname of the device. + + +-- + +*`device.vendor`*:: ++ +-- +type: text + +Device vendor information. + + +-- + +*`device.version`*:: ++ +-- +type: keyword + +Device version. + + +-- + +*`device.serial_number`*:: ++ +-- +type: keyword + +Device serial number. + + +-- + +*`device.timezone.offset.sec`*:: ++ +-- +type: long + +example: -5400 + +Timezone offset of the host in seconds. +Number of seconds relative to UTC. If the offset is -01:30 the value will be -5400. + + +-- + +*`device.type`*:: ++ +-- +type: keyword + +example: firewall + +The type of the device the data is coming from. +There is no predefined list of device types. Some examples are `endpoint`, `firewall`, `ids`, `ips`, `proxy`. + + +-- + +[float] +== error fields + +These fields can represent errors of any kind. Use them for errors that happen while fetching events or in cases where the event itself contains an error. + + + +*`error.id`*:: ++ +-- +type: keyword + +Unique identifier for the error. + + +-- + +*`error.message`*:: ++ +-- +type: text + +Error message. + + +-- + +*`error.code`*:: ++ +-- +type: keyword + +Error code describing the error. + + +-- + +[float] +== event fields + +The event fields are used for context information about the data itself. + + + +*`event.id`*:: ++ +-- +type: keyword + +example: 8a4f500d + +Unique ID to describe the event. + + +-- + +*`event.category`*:: ++ +-- +type: keyword + +example: metrics + +Event category. +This can be a user defined category. + + +-- + +*`event.type`*:: ++ +-- +type: keyword + +example: nginx-stats-metrics + +A type given to this kind of event which can be used for grouping. +This is normally defined by the user. + + +-- + +*`event.action`*:: ++ +-- +type: keyword + +example: reject + +The action captured by the event. The type of action will vary from system to system but is likely to include actions by security services, such as blocking or quarantining; as well as more generic actions such as login events, file i/o or proxy forwarding events. +The value is normally defined by the user. + + +-- + +*`event.module`*:: ++ +-- +type: keyword + +example: mysql + +Name of the module this data is coming from. +This information is coming from the modules used in Beats or Logstash. + + +-- + +*`event.dataset`*:: ++ +-- +type: keyword + +example: stats + +Name of the dataset. +The concept of a `dataset` (fileset / metricset) is used in Beats as a subset of modules. It contains the information which is currently stored in metricset.name and metricset.module or fileset.name. + + +-- + +*`event.severity`*:: ++ +-- +type: long + +example: 7 + +Severity describes the severity of the event. What the different severity values mean can very different between use cases. It's up to the implementer to make sure severities are consistent across events. + + +-- + +*`event.original`*:: ++ +-- +type: keyword + +example: Sep 19 08:26:10 host CEF:0|Security| threatmanager|1.0|100| worm successfully stopped|10|src=10.0.0.1 dst=2.1.2.2spt=1232 + +Raw text message of entire event. Used to demonstrate log integrity. +This field is not indexed and doc_values are disabled. It cannot be searched, but it can be retrieved from `_source`. + + +Field is not indexed. + +-- + +*`event.hash`*:: ++ +-- +type: keyword + +example: 123456789012345678901234567890ABCD + +Hash (perhaps logstash fingerprint) of raw field to be able to demonstrate log integrity. + + +-- + +*`event.version`*:: ++ +-- +type: keyword + +example: 0.1.0 + +required: True + +The version field contains the version an event for ECS adheres to. +This field should be provided as part of each event to make it possible to detect to which ECS version an event belongs. +event.version is a required field and must exist in all events. It describes which ECS version the event adheres to. +The current version is 0.1.0. + + +-- + +*`event.duration`*:: ++ +-- +type: long + +Duration of the event in nanoseconds. + + +-- + +*`event.created`*:: ++ +-- +type: date + +event.created contains the date when the event was created. +This timestamp is distinct from @timestamp in that @timestamp contains the processed timestamp. For logs these two timestamps can be different as the timestamp in the log line and when the event is read for example by Filebeat are not identical. `@timestamp` must contain the timestamp extracted from the log line, event.created when the log line is read. The same could apply to package capturing where @timestamp contains the timestamp extracted from the network package and event.created when the event was created. +In case the two timestamps are identical, @timestamp should be used. + + +-- + +*`event.risk_score`*:: ++ +-- +type: float + +Risk score or priority of the event (e.g. security solutions). Use your system's original value here. + + +-- + +*`event.risk_score_norm`*:: ++ +-- +type: float + +Normalized risk score or priority of the event, on a scale of 0 to 100. +This is mainly useful if you use more than one system that assigns risk scores, and you want to see a normalized value across all systems. + + +-- + +[float] +== file fields + +File fields provide details about each file. + + + +*`file.path`*:: ++ +-- +type: text + +Path to the file. + +*`file.path.raw`*:: ++ +-- +type: keyword + +Path to the file. This is a non-analyzed field that is useful for aggregations. + + +-- + +-- + +*`file.target_path`*:: ++ +-- +type: text + +Target path for symlinks. + +*`file.target_path.raw`*:: ++ +-- +type: keyword + +Path to the file. This is a non-analyzed field that is useful for aggregations. + + +-- + +-- + +*`file.extension`*:: ++ +-- +type: keyword + +example: png + +File extension. +This should allow easy filtering by file extensions. + + +-- + +*`file.type`*:: ++ +-- +type: keyword + +File type (file, dir, or symlink). + +-- + +*`file.device`*:: ++ +-- +type: keyword + +Device that is the source of the file. + +-- + +*`file.inode`*:: ++ +-- +type: keyword + +Inode representing the file in the filesystem. + +-- + +*`file.uid`*:: ++ +-- +type: keyword + +The user ID (UID) or security identifier (SID) of the file owner. + + +-- + +*`file.owner`*:: ++ +-- +type: keyword + +File owner's username. + +-- + +*`file.gid`*:: ++ +-- +type: keyword + +Primary group ID (GID) of the file. + +-- + +*`file.group`*:: ++ +-- +type: keyword + +Primary group name of the file. + +-- + +*`file.mode`*:: ++ +-- +type: keyword + +example: 416 + +Mode of the file in octal representation. + +-- + +*`file.size`*:: ++ +-- +type: long + +File size in bytes (field is only added when `type` is `file`). + +-- + +*`file.mtime`*:: ++ +-- +type: date + +Last time file content was modified. + +-- + +*`file.ctime`*:: ++ +-- +type: date + +Last time file metadata changed. + +-- + +[float] +== geo fields + +Geo fields can carry data about a specific location related to an event or geo information for an IP field. + + + +*`geo.continent_name`*:: ++ +-- +type: keyword + +Name of the continent. + + +-- + +*`geo.country_iso_code`*:: ++ +-- +type: keyword + +Country ISO code. + + +-- + +*`geo.location`*:: ++ +-- +type: geo_point + +Longitude and latitude. + + +-- + +*`geo.region_name`*:: ++ +-- +type: keyword + +Region name. + + +-- + +*`geo.city_name`*:: ++ +-- +type: keyword + +City name. + + +-- + +[float] +== host fields + +Host fields provide information related to a host. A host can be a physical machine, a virtual machine, or a Docker container. +Normally the host information is related to the machine on which the event was generated/collected, but they can be used differently if needed. + + + +*`host.timezone.offset.sec`*:: ++ +-- +type: long + +example: -5400 + +Timezone offset of the host in seconds. +Number of seconds relative to UTC. If the offset is -01:30 the value will be -5400. + + +-- + +*`host.name`*:: ++ +-- +type: keyword + +host.name is the hostname of the host. +It can contain what `hostname` returns on Unix systems, the fully qualified domain name, or a name specified by the user. The sender decides which value to use. + + +-- + +*`host.id`*:: ++ +-- +type: keyword + +Unique host id. +As hostname is not always unique, use values that are meaningful in your environment. +Example: The current usage of `beat.name`. + + +-- + +*`host.ip`*:: ++ +-- +type: ip + +Host ip address. + + +-- + +*`host.mac`*:: ++ +-- +type: keyword + +Host mac address. + + +-- + +*`host.type`*:: ++ +-- +type: keyword + +Type of host. +For Cloud providers this can be the machine type like `t2.medium`. If vm, this could be the container, for example, or other information meaningful in your environment. + + +-- + +*`host.os.platform`*:: ++ +-- +type: keyword + +example: darwin + +Operating system platform (centos, ubuntu, windows, etc.) + + +-- + +*`host.os.name`*:: ++ +-- +type: keyword + +example: Mac OS X + +Operating system name. + + +-- + +*`host.os.family`*:: ++ +-- +type: keyword + +example: debian + +OS family (redhat, debian, freebsd, windows, etc.) + + +-- + +*`host.os.version`*:: ++ +-- +type: keyword + +example: 10.12.6 + +Operating system version. + + +-- + +*`host.architecture`*:: ++ +-- +type: keyword + +example: x86_64 + +Operating system architecture. + + +-- + +[float] +== http fields + +Fields related to HTTP requests and responses. + + + +*`http.request.method`*:: ++ +-- +type: keyword + +example: GET, POST, PUT + +Http request method. + + +-- + +*`http.response.status_code`*:: ++ +-- +type: long + +example: 404 + +Http response status code. + + +-- + +*`http.response.body`*:: ++ +-- +type: text + +example: Hello world + +The full http response body. + + +-- + +*`http.version`*:: ++ +-- +type: keyword + +example: 1.1 + +Http version. + + +-- + +[float] +== log fields + +Fields which are specific to log events. + + + +*`log.level`*:: ++ +-- +type: keyword + +example: ERR + +Log level of the log event. +Some examples are `WARN`, `ERR`, `INFO`. + + +-- + +*`log.original`*:: ++ +-- +type: keyword + +example: Sep 19 08:26:10 localhost My log + + +This is the original log message and contains the full log message before splitting it up in multiple parts. +In contrast to the `message` field which can contain an extracted part of the log message, this field contains the original, full log message. It can have already some modifications applied like encoding or new lines removed to clean up the log message. +This field is not indexed and doc_values are disabled so it can't be queried but the value can be retrieved from `_source`. + + +Field is not indexed. + +-- + +[float] +== network fields + +Fields related to network data. + + + +*`network.name`*:: ++ +-- +type: text + +example: Guest Wifi + +Name given by operators to sections of their network. + + +*`network.name.raw`*:: ++ +-- +type: keyword + +Name given by operators to sections of their network. + + +-- + +-- + +*`network.protocol`*:: ++ +-- +type: keyword + +example: http + +Network protocol name. + + +-- + +*`network.direction`*:: ++ +-- +type: keyword + +example: inbound + +Direction of the network traffic. +Recommended values are: + * inbound + * outbound + * unknown + + +-- + +*`network.forwarded_ip`*:: ++ +-- +type: ip + +example: 192.1.1.2 + +Host IP address when the source IP address is the proxy. + + +-- + +*`network.inbound.bytes`*:: ++ +-- +type: long + +example: 184 + +Network inbound bytes. + + +-- + +*`network.inbound.packets`*:: ++ +-- +type: long + +example: 12 + +Network inbound packets. + + +-- + +*`network.outbound.bytes`*:: ++ +-- +type: long + +example: 184 + +Network outbound bytes. + + +-- + +*`network.outbound.packets`*:: ++ +-- +type: long + +example: 12 + +Network outbound packets. + + +-- + +*`network.total.bytes`*:: ++ +-- +type: long + +example: 368 + +Network total bytes. The sum of inbound.bytes + outbound.bytes. + + +-- + +*`network.total.packets`*:: ++ +-- +type: long + +example: 24 + +Network outbound packets. The sum of inbound.packets + outbound.packets + + +-- + +[float] +== organization fields + +The organization fields enrich data with information about the company or entity the data is associated with. These fields help you arrange or filter data stored in an index by one or multiple organizations. + + + +*`organization.name`*:: ++ +-- +type: text + +Organization name. + + +-- + +*`organization.id`*:: ++ +-- +type: keyword + +Unique identifier for the organization. + + +-- + +[float] +== os fields + +The OS fields contain information about the operating system. These fields are often used inside other prefixes, such as `host.os.*` or `user_agent.os.*`. + + + +*`os.platform`*:: ++ +-- +type: keyword + +example: darwin + +Operating system platform (such centos, ubuntu, windows). + + +-- + +*`os.name`*:: ++ +-- +type: keyword + +example: Mac OS X + +Operating system name. + + +-- + +*`os.family`*:: ++ +-- +type: keyword + +example: debian + +OS family (such as redhat, debian, freebsd, windows). + + +-- + +*`os.version`*:: ++ +-- +type: keyword + +example: 10.12.6-rc2 + +Operating system version as a raw string. + + +-- + +*`os.kernel`*:: ++ +-- +type: keyword + +example: 4.4.0-112-generic + +Operating system kernel version as a raw string. + + +-- + +[float] +== process fields + +These fields contain information about a process. These fields can help you correlate metrics information with a process id/name from a log message. The `process.pid` often stays in the metric itself and is copied to the global field for correlation. + + + +*`process.args`*:: ++ +-- +type: keyword + +example: ['-l', 'user', '10.0.0.16'] + +Process arguments. +May be filtered to protect sensitive information. + + +-- + +*`process.name`*:: ++ +-- +type: keyword + +example: ssh + +Process name. +Sometimes called program name or similar. + + +-- + +*`process.pid`*:: ++ +-- +type: long + +Process id. + + +-- + +*`process.ppid`*:: ++ +-- +type: long + +Process parent id. + + +-- + +*`process.title`*:: ++ +-- +type: keyword + +Process title. +The proctitle, often the same as process name. + + +-- + +[float] +== service fields + +The service fields describe the service for or from which the data was collected. These fields help you find and correlate logs for a specific service and version. + + + +*`service.id`*:: ++ +-- +type: keyword + +example: d37e5ebfe0ae6c4972dbe9f0174a1637bb8247f6 + +Unique identifier of the running service. +This id should uniquely identify this service. This makes it possible to correlate logs and metrics for one specific service. +Example: If you are experiencing issues with one redis instance, you can filter on that id to see metrics and logs for that single instance. + + +-- + +*`service.name`*:: ++ +-- +type: keyword + +example: elasticsearch + +Name of the service data is collected from. +The name can be used to group and correlate logs and metrics from one service. +Example: If logs or metrics are collected from Redis, `service.name` would be `redis`. + + +-- + +*`service.type`*:: ++ +-- +type: keyword + +Service type. + + +-- + +*`service.state`*:: ++ +-- +type: keyword + +Current state of the service. + + +-- + +*`service.version`*:: ++ +-- +type: keyword + +example: 3.2.4 + +Version of the service the data was collected from. +This allows to look at a data set only for a specific version of a service. + + +-- + +*`service.ephemeral_id`*:: ++ +-- +type: keyword + +example: 8a4f500f + +Ephemeral identifier of this service (if one exists). +This id normally changes across restarts, but `service.id` does not. + + +-- + +[float] +== url fields + +URL fields provide a complete URL, with scheme, host, and path. The URL object can be reused in other prefixes, such as `host.url.*` for example. Keep the structure consistent whenever you use URL fields. + + + +*`url.href`*:: ++ +-- +type: text + +example: https://elastic.co:443/search?q=elasticsearch#top + +Full url. The field is stored as keyword. +`url.href` is a [multi field](https://www.elastic.co/guide/en/ elasticsearch/reference/6.2/ multi-fields.html#_multi_fields_with_multiple_analyzers). The data is stored as keyword `url.href` and test `url.href.analyzed`. These fields enable you to run a query against part of the url still works splitting up the URL at ingest time. +`href` is an analyzed field so the parsed information can be accessed through `href.analyzed` in queries. + + +*`url.href.raw`*:: ++ +-- +type: keyword + +The full URL. This is a non-analyzed field that is useful for aggregations. + + +-- + +-- + +*`url.scheme`*:: ++ +-- +type: keyword + +example: https + +Scheme of the request, such as "https". +Note: The `:` is not part of the scheme. + + +-- + +*`url.hostname`*:: ++ +-- +type: keyword + +example: elastic.co + +Hostname of the request, such as "elastic.co". +In some cases a URL may refer to an IP and/or port directly, without a domain name. In this case, the IP address would go to the `hostname` field. + + +-- + +*`url.port`*:: ++ +-- +type: integer + +example: 443 + +Port of the request, such as 443. + + +-- + +*`url.path`*:: ++ +-- +type: text + +Path of the request, such as "/search". + + +*`url.path.raw`*:: ++ +-- +type: keyword + +URL path. A non-analyzed field that is useful for aggregations. + + +-- + +-- + +*`url.query`*:: ++ +-- +type: text + +The query field describes the query string of the request, such as "q=elasticsearch". +The `?` is excluded from the query string. If a URL contains no `?`, there is no query field. If there is a `?` but no query, the query field exists with an empty string. The `exists` query can be used to differentiate between the two cases. + + +*`url.query.raw`*:: ++ +-- +type: keyword + +URL query part. A non-analyzed field that is useful for aggregations. + + +-- + +-- + +*`url.fragment`*:: ++ +-- +type: keyword + +Portion of the url after the `#`, such as "top". +The `#` is not part of the fragment. + + +-- + +*`url.username`*:: ++ +-- +type: keyword + +Username of the request. + + +-- + +*`url.password`*:: ++ +-- +type: keyword + +Password of the request. + + +-- + +[float] +== user fields + +The user fields describe information about the user that is relevant to the event. Fields can have one entry or multiple entries. If a user has more than one id, provide an array that includes all of them. + + + +*`user.id`*:: ++ +-- +type: keyword + +One or multiple unique identifiers of the user. + + +-- + +*`user.name`*:: ++ +-- +type: keyword + +Name of the user. +The field is a keyword, and will not be tokenized. + + +-- + +*`user.email`*:: ++ +-- +type: keyword + +User email address. + + +-- + +*`user.hash`*:: ++ +-- +type: keyword + +Unique user hash to correlate information for a user in anonymized form. +Useful if `user.id` or `user.name` contain confidential information and cannot be used. + + +-- + +[float] +== user_agent fields + +The user_agent fields normally come from a browser request. They often show up in web service logs coming from the parsed user agent string. + + + +*`user_agent.original`*:: ++ +-- +type: text + +Unparsed version of the user_agent. + + +-- + +*`user_agent.device`*:: ++ +-- +type: keyword + +Name of the physical device. + + +-- + +*`user_agent.version`*:: ++ +-- +type: keyword + +Version of the physical device. + + +-- + +*`user_agent.major`*:: ++ +-- +type: long + +Major version of the user agent. + + +-- + +*`user_agent.minor`*:: ++ +-- +type: long + +Minor version of the user agent. + + +-- + +*`user_agent.patch`*:: ++ +-- +type: keyword + +Patch version of the user agent. + + +-- + +*`user_agent.name`*:: ++ +-- +type: keyword + +example: Chrome + +Name of the user agent. + + +-- + +*`user_agent.os.name`*:: ++ +-- +type: keyword + +Name of the operating system. + + +-- + +*`user_agent.os.version`*:: ++ +-- +type: keyword + +Version of the operating system. + + +-- + +*`user_agent.os.major`*:: ++ +-- +type: long + +Major version of the operating system. + + +-- + +*`user_agent.os.minor`*:: ++ +-- +type: long + +Minor version of the operating system. + + +-- + +[[exported-fields-host-processor]] +== Host fields + +Info collected for the host machine. + + + + +*`host.os.kernel`*:: ++ +-- +type: keyword + +The operating system's kernel version. + + +-- + +[[exported-fields-kubernetes-processor]] +== Kubernetes fields + +Kubernetes metadata added by the kubernetes processor + + + + +*`kubernetes.pod.name`*:: ++ +-- +type: keyword + +Kubernetes pod name + + +-- + +*`kubernetes.pod.uid`*:: ++ +-- +type: keyword + +Kubernetes Pod UID + + +-- + +*`kubernetes.namespace`*:: ++ +-- +type: keyword + +Kubernetes namespace + + +-- + +*`kubernetes.node.name`*:: ++ +-- +type: keyword + +Kubernetes node name + + +-- + +*`kubernetes.labels`*:: ++ +-- +type: object + +Kubernetes labels map + + +-- + +*`kubernetes.annotations`*:: ++ +-- +type: object + +Kubernetes annotations map + + +-- + +*`kubernetes.container.name`*:: ++ +-- +type: keyword + +Kubernetes container name + + +-- + +*`kubernetes.container.image`*:: ++ +-- +type: keyword + +Kubernetes container image + + +-- + diff --git a/journalbeat/docs/filtering.asciidoc b/journalbeat/docs/filtering.asciidoc new file mode 100644 index 00000000000..e75b4e73bd7 --- /dev/null +++ b/journalbeat/docs/filtering.asciidoc @@ -0,0 +1,43 @@ +[[filtering-and-enhancing-data]] +== Filter and enhance the exported data + +IMPORTANT: This documentation is placeholder content. It has not yet been reviewed. + +Your use case might require only a subset of the data exported by {beatname_uc}, +or you might need to enhance the exported data (for example, by adding +metadata). {beatname_uc} provides a couple of options for filtering and +enhancing exported data. + +You can configure each input to include or exclude specific lines or files. This +allows you to specify different filtering criteria for each input. To do this, +you use the `include_lines`, `exclude_lines`, and `exclude_files` options under +the +{beatname_lc}.inputs+ section of the config file (see +<>). The disadvantage of this approach is +that you need to implement a configuration option for each filtering criteria +that you need. + +Another approach (the one described here) is to define processors to configure +global processing across all data exported by {beatname_uc}. + + +[float] +[[using-processors]] +=== Processors + +include::../../libbeat/docs/processors.asciidoc[] + +[float] +[[specific-example]] +==== XYZ example + +ADD EXAMPLES SPECIFIC TO THE BEAT, OR DELETE THIS SECTION + +// You must set the processor-scope attribute to resolve the attribute reference +// defined in processors-using.asciidoc. The attribute is used to indicate where +// processors are valid. If processors are valid in more than two locations +// (root and :processor-scope:), you need to add a conditionally coded section +// to processors-using.asciidoc. + +:processor-scope: input +include::../../libbeat/docs/processors-using.asciidoc[] +:processor-scope!: diff --git a/journalbeat/docs/general-options.asciidoc b/journalbeat/docs/general-options.asciidoc new file mode 100644 index 00000000000..97367b71aac --- /dev/null +++ b/journalbeat/docs/general-options.asciidoc @@ -0,0 +1,10 @@ +[[configuration-general-options]] +== Specify general settings + +IMPORTANT: This documentation is placeholder content. It has not yet been reviewed. + +You can specify settings in the +{beatname_lc}.yml+ config file to control the +general behavior of {beatname_uc}. + +include::../../libbeat/docs/generalconfig.asciidoc[] + diff --git a/journalbeat/docs/getting-started.asciidoc b/journalbeat/docs/getting-started.asciidoc new file mode 100644 index 00000000000..6fffeff4b5e --- /dev/null +++ b/journalbeat/docs/getting-started.asciidoc @@ -0,0 +1,270 @@ +[id="{beatname_lc}-getting-started"] +== Getting Started With {beatname_uc} + +IMPORTANT: This documentation is placeholder content. It has not yet been reviewed. + +include::../../libbeat/docs/shared-getting-started-intro.asciidoc[] + +* <<{beatname_lc}-installation>> +* <<{beatname_lc}-configuration>> +* <<{beatname_lc}-template>> +* <> +* <<{beatname_lc}-starting>> +* <> +* <> + +[id="{beatname_lc}-installation"] +=== Step 1: Install {beatname_uc} + +IMPORTANT: This documentation is placeholder content. It has not yet been reviewed. + +:no-docker: +include::../../libbeat/docs/shared-download-and-install.asciidoc[] + +[[deb]] +*deb:* + +ifeval::["{release-state}"=="unreleased"] + +Version {version} of {beatname_uc} has not yet been released. + +endif::[] + +ifeval::["{release-state}"!="unreleased"] + +["source","sh",subs="attributes"] +------------------------------------------------ +curl -L -O https://artifacts.elastic.co/downloads/beats/{beatname_lc}/{beatname_lc}-{version}-amd64.deb +sudo dpkg -i {beatname_lc}-{version}-amd64.deb +------------------------------------------------ + +endif::[] + +[[rpm]] +*rpm:* + +ifeval::["{release-state}"=="unreleased"] + +Version {version} of {beatname_uc} has not yet been released. + +endif::[] + +ifeval::["{release-state}"!="unreleased"] + +["source","sh",subs="attributes"] +------------------------------------------------ +curl -L -O https://artifacts.elastic.co/downloads/beats/{beatname_lc}/{beatname_lc}-{version}-x86_64.rpm +sudo rpm -vi {beatname_lc}-{version}-x86_64.rpm +------------------------------------------------ + +endif::[] + +[[mac]] +*mac:* + +ifeval::["{release-state}"=="unreleased"] + +Version {version} of {beatname_uc} has not yet been released. + +endif::[] + +ifeval::["{release-state}"!="unreleased"] + +["source","sh",subs="attributes"] +------------------------------------------------ +curl -L -O https://artifacts.elastic.co/downloads/beats/{beatname_lc}/{beatname_lc}-{version}-darwin-x86_64.tar.gz +tar xzvf {beatname_lc}-{version}-darwin-x86_64.tar.gz +------------------------------------------------ + +endif::[] + +[[win]] +*win:* + +ifeval::["{release-state}"=="unreleased"] + +Version {version} of {beatname_uc} has not yet been released. + +endif::[] + +ifeval::["{release-state}"!="unreleased"] + +. Download the {beatname_uc} Windows zip file from the +https://www.elastic.co/downloads/beats/{beatname_lc}[downloads page]. + +. Extract the contents of the zip file into `C:\Program Files`. + +. Rename the +{beatname_lc}--windows+ directory to +{beatname_uc}+. + +. Open a PowerShell prompt as an Administrator (right-click the PowerShell icon and select *Run As Administrator*). + +. From the PowerShell prompt, run the following commands to install {beatname_uc} as a +Windows service: ++ +["source","sh",subs="attributes"] +---------------------------------------------------------------------- +PS > cd 'C:{backslash}Program Files{backslash}{beatname_uc}' +PS C:{backslash}Program Files{backslash}{beatname_uc}> .{backslash}install-service-{beatname_lc}.ps1 +---------------------------------------------------------------------- + +NOTE: If script execution is disabled on your system, you need to set the execution policy for the current session to allow the script to run. For example: +PowerShell.exe -ExecutionPolicy UnRestricted -File .\install-service-{beatname_lc}.ps1+. + +endif::[] + +[id="{beatname_lc}-configuration"] +=== Step 2: Configure {beatname_uc} + +IMPORTANT: This documentation is placeholder content. It has not yet been reviewed. + +:no-docker: +include::../../libbeat/docs/shared-configuring.asciidoc[] + +Here is a sample of the +{beatname_lc}+ section of the +{beatname_lc}.yml+ file. +{beatname_uc} uses predefined default values for most configuration options. + +["source","sh",subs="attributes"] +---------------------------------------------------------------------- +journalbeat.inputs: +- paths: ["/path/to/journal/directory"] + seek: cursor +---------------------------------------------------------------------- + +To configure {beatname_uc}: + +. Specify a list of paths to your systemd journal files. Each path can be a +directory path (to collect events from all journals in a directory), or a file +path. For example: ++ +["source","sh",subs="attributes"] +---- +{beatname_lc}.inputs: +- paths: + - "/dev/log" + - "/var/log/messages/my-journal-file" +---- ++ +If no paths are specified, {beatname_uc} reads from the default journal. + +. Set the `seek` option to control the position where {beatname_uc} starts +reading the journal. The available options are `head`, `tail`, and `cursor`. +Typically, you'll set `seek: cursor` so {beatname_uc} can continue reading +where it left off after a reload or restart. + +. Optional: Set the `include_matches` option to filter entries in journald +before collecting any log events. This reduces the number of fields that the +Beat needs to process. For example, to fetch only Redis events from a Docker +container tagged as `redis`, use: ++ +["source","sh",subs="attributes"] +---- +{beatname_lc}.inputs: +- paths: [] + include_matches: + - "CONTAINER_TAG=redis" + - "_COMM=redis" +---- ++ +See <> for more about this setting. + +include::../../libbeat/docs/step-configure-output.asciidoc[] + +include::../../libbeat/docs/step-configure-kibana-endpoint.asciidoc[] + +include::../../libbeat/docs/step-configure-credentials.asciidoc[] + +include::../../libbeat/docs/step-test-config.asciidoc[] + +include::../../libbeat/docs/step-look-at-config.asciidoc[] + + +[id="{beatname_lc}-template"] +=== Step 3: Load the index template in Elasticsearch + +IMPORTANT: This documentation is placeholder content. It has not yet been reviewed. + +:allplatforms: +include::../../libbeat/docs/shared-template-load.asciidoc[] + +[[load-kibana-dashboards]] +=== Step 4: Set up the Kibana dashboards + +IMPORTANT: This documentation is placeholder content. It has not yet been reviewed. + +:allplatforms: +include::../../libbeat/docs/dashboards.asciidoc[] + +[id="{beatname_lc}-starting"] +=== Step 5: Start {beatname_uc} + +IMPORTANT: This documentation is placeholder content. It has not yet been reviewed. + +Start {beatname_uc} by issuing the appropriate command for your platform. If you +are accessing a secured Elasticsearch cluster, make sure you've configured +credentials as described in <<{beatname_lc}-configuration>>. + +NOTE: If you use an init.d script to start {beatname_uc} on deb or rpm, you can't +specify command line flags (see <>). To specify flags, +start {beatname_uc} in the foreground. + +*deb:* + +["source","sh",subs="attributes"] +---------------------------------------------------------------------- +sudo service {beatname_lc} start +---------------------------------------------------------------------- + +*rpm:* + +["source","sh",subs="attributes"] +---------------------------------------------------------------------- +sudo service {beatname_lc} start +---------------------------------------------------------------------- + +*mac:* + +["source","sh",subs="attributes"] +---------------------------------------------------------------------- +sudo chown root {beatname_lc}.yml <1> +sudo ./{beatname_lc} -e -c {beatname_lc}.yml -d "publish" +---------------------------------------------------------------------- +<1> You'll be running {beatname_uc} as root, so you need to change ownership +of the configuration file, or run {beatname_uc} with `--strict.perms=false` +specified. See +{libbeat}/config-file-permissions.html[Config File Ownership and Permissions] +in the _Beats Platform Reference_. + +*win:* + +["source","sh",subs="attributes"] +---------------------------------------------------------------------- +PS C:{backslash}Program Files{backslash}{beatname_uc}> Start-Service {beatname_lc} +---------------------------------------------------------------------- + + +By default, Windows log files are stored in +C:\ProgramData\{beatname_lc}\Logs+. + +{beatname_uc} is now ready to send log files to your defined output. + +[[view-kibana-dashboards]] +=== Step 6: View the sample Kibana dashboards + +IMPORTANT: This documentation is placeholder content. It has not yet been reviewed. + +To make it easier for you to explore {beatname_uc} data in Kibana, we've created +example {beatname_uc} dashboards. You loaded the dashboards earlier when you +ran the `setup` command. + +include::../../libbeat/docs/opendashboards.asciidoc[] + +You can use these dashboards as examples and +{kibana-ref}/dashboard.html[customize] them to meet your needs. + +To populate the example dashboards with data, you need to either +<> or use Logstash to +parse the data into the fields expected by the dashboards. + +Here is an example of the {beatname_uc} ADD DASHBOARD NAME dashboard: + +// Add an example of the dashboard +//[role="screenshot"] +//image:./images/add-image-name.png[] diff --git a/journalbeat/docs/how-it-works.asciidoc b/journalbeat/docs/how-it-works.asciidoc new file mode 100644 index 00000000000..375c55507e3 --- /dev/null +++ b/journalbeat/docs/how-it-works.asciidoc @@ -0,0 +1,6 @@ +[id="how-{beatname_lc}-works"] +== How {beatname_uc} works + +IMPORTANT: This documentation is placeholder content. It has not yet been reviewed. + +DESCRIBE HOW THE BEAT WORKS. diff --git a/journalbeat/docs/images/kibana-created-indexes.png b/journalbeat/docs/images/kibana-created-indexes.png new file mode 100644 index 00000000000..ad9c65ae1c7 Binary files /dev/null and b/journalbeat/docs/images/kibana-created-indexes.png differ diff --git a/journalbeat/docs/images/kibana-navigation-vis.png b/journalbeat/docs/images/kibana-navigation-vis.png new file mode 100644 index 00000000000..8f7ce06c5cd Binary files /dev/null and b/journalbeat/docs/images/kibana-navigation-vis.png differ diff --git a/journalbeat/docs/index.asciidoc b/journalbeat/docs/index.asciidoc new file mode 100644 index 00000000000..be27ac9a406 --- /dev/null +++ b/journalbeat/docs/index.asciidoc @@ -0,0 +1,43 @@ += Journalbeat Reference + +include::../../libbeat/docs/version.asciidoc[] + +include::{asciidoc-dir}/../../shared/attributes.asciidoc[] + +:version: {stack-version} +:beatname_lc: journalbeat +:beatname_uc: Journalbeat +:beatname_pkg: {beatname_lc} +:github_repo_name: beats +:discuss_forum: beats/{beatname_lc} +:beat_default_index_prefix: {beatname_lc} +:has_ml_jobs: no +:libbeat-docs: Beats Platform Reference + +include::../../libbeat/docs/shared-beats-attributes.asciidoc[] + +:release-state: released + +include::./overview.asciidoc[] + +include::./getting-started.asciidoc[] + +include::../../libbeat/docs/repositories.asciidoc[] + +include::./setting-up-running.asciidoc[] + +//TODO: Decide whether this requires a separate topic +//include::./how-it-works.asciidoc[] + +include::./configuring-howto.asciidoc[] + +include::./fields.asciidoc[] + +include::../../libbeat/docs/monitoring/monitoring-beats.asciidoc[] + +include::../../libbeat/docs/shared-securing-beat.asciidoc[] + +include::./troubleshooting.asciidoc[] + +include::./faq.asciidoc[] + diff --git a/journalbeat/docs/overview.asciidoc b/journalbeat/docs/overview.asciidoc new file mode 100644 index 00000000000..8ed7b455388 --- /dev/null +++ b/journalbeat/docs/overview.asciidoc @@ -0,0 +1,19 @@ +[id="{beatname_lc}-overview"] +== {beatname_uc} overview + +++++ +Overview +++++ + +IMPORTANT: This documentation is placeholder content. It has not yet been reviewed. + +{beatname_uc} is a lightweight shipper for forwarding and centralizing log data +from https://www.freedesktop.org/software/systemd/man/systemd-journald.service.html[systemd journals]. +Installed as an agent on your servers, {beatname_uc} monitors the journal +locations that you specify, collects log events, and forwards them to either to +https://www.elastic.co/products/elasticsearch[Elasticsearch] or +https://www.elastic.co/products/logstash[Logstash]. + +{beatname_uc} is an Elastic https://www.elastic.co/products/beats[Beat]. It's +based on the `libbeat` framework. For more information, see the +{libbeat}/index.html[{libbeat-docs}]. diff --git a/journalbeat/docs/page_header.html b/journalbeat/docs/page_header.html new file mode 100644 index 00000000000..0f01f3bfad5 --- /dev/null +++ b/journalbeat/docs/page_header.html @@ -0,0 +1,4 @@ +This functionality is experimental and may be changed or removed completely in a +future release. Elastic will take a best effort approach to fix any issues, but +experimental features are not subject to the support SLA of official GA +features. \ No newline at end of file diff --git a/journalbeat/docs/running-on-docker.asciidoc b/journalbeat/docs/running-on-docker.asciidoc new file mode 100644 index 00000000000..6bbc976ad85 --- /dev/null +++ b/journalbeat/docs/running-on-docker.asciidoc @@ -0,0 +1 @@ +include::../../libbeat/docs/shared-docker.asciidoc[] diff --git a/journalbeat/docs/running-on-kubernetes.asciidoc b/journalbeat/docs/running-on-kubernetes.asciidoc new file mode 100644 index 00000000000..16b53e8e3af --- /dev/null +++ b/journalbeat/docs/running-on-kubernetes.asciidoc @@ -0,0 +1,6 @@ +[id="running-{beatname_lc}-on-kubernetes"] +=== Running {beatname_uc} on Kubernetes + +IMPORTANT: This documentation is placeholder content. It has not yet been reviewed. + +ADD CONTENT HERE. diff --git a/journalbeat/docs/setting-up-running.asciidoc b/journalbeat/docs/setting-up-running.asciidoc new file mode 100644 index 00000000000..8f7ccba3bb8 --- /dev/null +++ b/journalbeat/docs/setting-up-running.asciidoc @@ -0,0 +1,44 @@ +///// +// NOTE: +// Each beat has its own setup overview to allow for the addition of content +// that is unique to each beat. +///// + +[[setting-up-and-running]] +== Setting up and running {beatname_uc} + +IMPORTANT: This documentation is placeholder content. It has not yet been reviewed. + +Before reading this section, see the +<<{beatname_lc}-getting-started,getting started documentation>> for basic +installation instructions to get you started. + +This section includes additional information on how to set up and run +{beatname_uc}, including: + +* <> + +* <> + +* <> + +//* <> + +//* <> + +* <> + + +//MAINTAINERS: If you add a new file to this section, make sure you update the bulleted list ^^ too. + +include::../../libbeat/docs/shared-directory-layout.asciidoc[] + +include::../../libbeat/docs/keystore.asciidoc[] + +include::../../libbeat/docs/command-reference.asciidoc[] + +//include::./running-on-docker.asciidoc[] + +//include::./running-on-kubernetes.asciidoc[] + +include::../../libbeat/docs/shared-shutdown.asciidoc[] diff --git a/journalbeat/docs/troubleshooting.asciidoc b/journalbeat/docs/troubleshooting.asciidoc new file mode 100644 index 00000000000..fa25622e7f1 --- /dev/null +++ b/journalbeat/docs/troubleshooting.asciidoc @@ -0,0 +1,33 @@ +[[troubleshooting]] += Troubleshooting + +[partintro] +-- + +IMPORTANT: This documentation is placeholder content. It has not yet been reviewed. + +If you have issues installing or running {beatname_uc}, read the +following tips: + +* <> +* <> +* <> + +//sets block macro for getting-help.asciidoc included in next section + +-- + +[[getting-help]] +== Get help + +include::../../libbeat/docs/getting-help.asciidoc[] + +//sets block macro for debugging.asciidoc included in next section + +[id="enable-{beatname_lc}-debugging"] +== Debug + +include::../../libbeat/docs/debugging.asciidoc[] + + + diff --git a/journalbeat/include/fields.go b/journalbeat/include/fields.go new file mode 100644 index 00000000000..8ba15d1769e --- /dev/null +++ b/journalbeat/include/fields.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated by beats/dev-tools/cmd/asset/asset.go - DO NOT EDIT. + +package include + +import ( + "github.com/elastic/beats/libbeat/asset" +) + +func init() { + if err := asset.SetFields("journalbeat", "fields.yml", Asset); err != nil { + panic(err) + } +} + +// Asset returns asset data +func Asset() string { + return "eJzkff1z2ziy4O/5K1CZqptkT6Zt2XESv9p7Ly8fM75NJqk4ubm63S0LIiEJYwrgAKAVzb3736/Q+CBAghJlKZ5X9bJVm4lIdjeARnejv3CEbsn6EuV8ueTsEUKKqpJcosev4Qf0P3ktGC6nBKvHjxAqiMwFrRTl7BL9j0cIIfSaM4UpkxYEmlFSFhLhO0xLPC0JogzhskTkjjCF1LoiMnuE7GuXAOIIMbwkl0gQXNwouiRS4WUFj5IozZ8vC4L0y2i1IBGhAAepBUG/mR8RYUqsswhXzgUpao9F03WJ5oLXW/G+MwOsJSnQdI3kWiqyLI4cQHRLBCMlWpCyIiKzH4bDDcmoGVUBbEPGLVmvuCiC33uJQegVY1xh/UQiPkNLIiWeE70csC6Uzf1YJZoJvrQUA2qZdSmSRNz8GWRpxCFRjiC7iMW9V6oS/I4WZrUcsG3rwqe/kbw7BSHaLRPg0SuOSj5HnKEpWeBypqcDo4LOZkToLVEJPhd4mQVft6kKKcN1Ea1NP3FbCYR10uDcpuUzs02z1ospekKaSj6n7KamRQKBoa3kbJ54KMjvNRWkuEQzXEqSeIN8w8tKS6TTk5OTxPOtAzSCAihEX6/e6DFq0WAWWE9+TqRsD7gZmiRSUpCM32tgZ/cfFfCCI3HIyLz0W7aXqm+Lbx+MH8jj45JOj608dH+joyO9sR/vxpd6cFqfYFagkjLixrZxUPr/H2hUOw7nF7z0QyDfSF4rrRnTo2ieH2wsW6j7hNVCSylN3SYKd5sUr086UqF34+w1ioF725E1fyCyftqNrOqByPq0G1l2hQ+ndq6tdNhT8fAV09bK91A8A0VwyHaamPtrkvTOPiixDa1OOlui+qluWYIPTjILRKcjWtO0geKEAfufg2xn4HaNbnNm+M9obhbkjuYPpor01Nnzk0EMVPTIpHpqZvZPIM7j3kCfGcCNXC9Lym5lkkpFvrXZdC8SXxUF1c9wiSxeIEaiilOm9KHL6nk3u7yAA/pxQe42jkK/eFNhtXiQYXxpUagRm91EpXswiOo+i/B7ENzlW4Ql0vb5gq8kqitNcjD3ShCCpqTkK6QtqK5QyHlBDiISrBzS8NCcMCKwYQVt67W17iZZMKM9RunO09kYkll2LEV+nHNBjpeY4TkRWX6P00IkcHktcgLkotWCCGPWaqlIpZsAUqQ5Z1azXHXV9N7j/I1Pb0o+v5EKq1reWH/IngN1xKLVguaLZmh+vBZNeqj6YHUYY7M5pI/bp9kBI4IDHquXUyIiVh08KG88GxP2UTiWXRxFO6rOPt9R20Nzj4271SuzaZuCr+Ngx65NDpghi9t2vNjNufm4kTSV9x3APVgz6WDpG0HaudJnjvQTvbNDZcNABjtSepwoh6Z+INnbHCYbnSW707yBkmHOkaGDTh/zk4w9YIZPh/NAs//sylvpWvL53IrWXrE6//OIBaFpSRU1k5t5t/7zCIWz3VA6c1zhKS2pInJf9tRUkNmM5IrekRBwcqOn3Tm7K0o+cyC26ULK7ngOYaCbxOLcT668OH92Mju9eD4uyMX5Rf7iRV6cnp1hXBTns3Hx/GQH0diQp9dyxgXMmagZBBbzdV56MaQP7uE+QSscGJOIssQ6t3X+XtJUs1gmS5oT+M+j0/HZuf23VVBH40zmvCI76QamBC/tRoNzlj2lOHWzoERgkS/W3fGlvG877rot5AGGyHpo+1IQF/3OrH574qA6YqBrzVNTxh6VfbiixQk7rLwnU3/X45YCV9r+5A6kBNZ0IznDotJD5o3NKfuWSSLudpu17e7I+8TRv+9K7+iLVAIzWXGxG+FK1Gm65VqWfD6Q3J/5qn3MAzkrSE7oXSqE76hecKn20Wcaq4axTaFNOVcpVdZyDQxZ0aJ4kb98fo5lMTs5LaZkTGbji+L5TP8wvjjPX+6wxpqsUIXBv91MpjVVYAyUfL7v3G11LvVNaCUoF1StH9Zyc1gd+c74Ra/sfOhTU4UVBXNqbajv7pUZzuH5wxLvsO5JPC0IU3RGiTgEN8t6F7vLo77HGBz99pNHPYQnZVMPXdZToY9C0THIW3K1VHwZYWJEKlJ0cIUT1IPslZhSJbBYOwdPzpd6y0AWlLXXicwePbK5eVOCVZOZ9+/mXwMy8SAXbsd0vGjUGkAWOAQ2TF6oYzSFSBJWODkQSHOZoavgLfiMNqcgSZSzP3POZnReC2Odz2hJRvp3/RArdIfLWn8JaXgAkyr9T8ZVCGzk5brFZN//wgFVRMdIP4OfJvqfEw+Hw4j76cq6k+Ywbp84TxtoOVULZrQcBJQrJ01tsMl7ej3hwdyJmjHK5glq9GHmD84GUOPe/J7U3BERGOMbiLEvOrYCdobFD13CVDo1E2F6/G8+j/RxtG0LrMgmATHjYolV9J6Xca/qeS0VGl+oBRqfnF6M0On48uzZ5bOz7OxsPGx2gSSTsupVJGwQQXIuilhXtwal8FwOFS36XTNbOdaiAPi9IsIsFGYF/ANsPRwHPPQ8tRAb6RDNY5Qgaf5xk7ISewj1sgoMUb+ntIAyyFoUECG42NVGeas/aoSsTzzFTZSUshnXOzvHEuQX4JHbbBZNwQDreIMSNKSZqbZinuSykfJvX1+nhfzb19d+hmICo/nCc8K8PWxAvgp+gsm7RAN4FgC15tBEMbHCCE95bcQovHecl1T/JRe0AvZa4EYc54L4ME5sGXKuGFckWjqz5+QlurJi1y6QZl8JZm7J53LU4M6chNciGQQNescFevXpw8jqBrVoFs0MywopJ9pxVR3bI2EWDF7ziBYKWhYVnICSQfkCszlBdOZBwoRQqTWfnhLB6/kC/V6TuhGZEpX0lqC/4dktHqHPpKByhLhwSj94sVFidb7Q0vg9n0uF5QKZMaFrIu6abKPuluhj3Vj63pN7/1csmc38N0xo/nixeZGdZCdHIh93iBkYbBgYKNhChuOLhB28Hw1fGf297tq0VFr+eUJnSOtU8o1KJZ92KPTcfgkcazgcvl/xuiy08AZ+pkUy5PACn8+enZwUnXGRakGWROBymOt1k7RykPYZ5Bf9Mi0Q05upLNd2C0mEc8GltjekwkLJEZrWCk3MatJi4vfcptHPuiJwir0h7kzn5hcrAE+3C0ANBpRn7lSWNp+tQDT2DxZEG0MQIuIVKskdKUGASOLtb0GcWW6Hq6GA+e3ju3L33Zwwc1DK1EGbfDXVAstWeLQ51mk75+jk2dH47MvJi8uTZ5dn59mLZ2f/Z+Bh7w1W5Lip0mlMHi7onLImDyP66p0R73ZaDJsZAQ6DSgKMDKeRtnAikFpmwxdUWY8STmH+bCfJzDjoGX9akq33E2YZ2rC/mjn9+z8eV4IXNdhd/3g8Qv94TNjd+B+P/zlwVt9TqTTbWCS2GElxTQoiOF+ECrZDb4mnpOxS3Cl58QT/31uyPr00Z67TkcY6tv8a/79hBP+NrI/Nka3CVLQnUv95baxUNxBc6JO4VqiB8lXcLQS6XoBoBE1sjZLoQG7+mCHJDL0qS0Ow2YlScb3GWLoZ3CSTJwXPb4mYgNE8uX0hJ3YGe6Y3dkugPp+K3XWnSQ75mZQlR79yUQ6NmHe2TBOoMqzsxVcyjSaCdcUQVwsi4ECsDa8kvHjBcs5yrAiLZQ4Ksmfs/DciE07zM0FIuUaSYJEvIKyuD/PLulS0amWb+Roy0DFg+q0dGTlfTikD76bioIi6w/POnJLXRawZXgc/DbONrfdTkNIYtSZDAEBrE42ymcBSiTpXtc0/MyvTWKBGIzjHz0BjeIY+ECVoPjXHbW/Bar3C0NvXY3AoAKvOiMoXRBq7FHxLNECvXxsFNMNBKOKRyMCnEi1xvqDMrE9DRHjgl0AGEmTJFXHvI14rSQsS4EpTh5G1vUOQoXkOHxuaWyxtwDaggFst+tDqtwjiidtd69qCQpHauiQwc/e2aM24HLrMMUIoykg+HqF5TvQ5orXx5lThkucEsx5JZZ2C4GC9CRxE0YBqeUSwVEen+X7jehUgQ+Bjoo3/iErDt83C9JAsyHzY6aVL/zAyPwOCe9FGmVSY5SQbZG57AilE9Z5dPH/x8gRP84LMToaRemXxBbFnINRt1C1U7n/48gSE3t8BJLinA90ofqbUOFuSgtbLYeR9cBJgXe1CHc5zXsPRYxfaLi4unj9//uLFi5cvXw4j70sjDw1GrTe4mGNG/zD2Di28erXnrnWjTyNY+qGiRIJ332jPI62MmUKE3VHB2TJ1Ng5Vy6tfrz0htBihnzifl8RoRvTx80/oqgBfhbUM4MwbgWqOhimda0S1l5lO77Z+HqZ7/Vfh6QpmStvrHbOxcVLJiuR0RvMOOci4Su0ZwyZ48lkIpnWgW5CyQjkXxgAwukcfFRvm8DhsfTtma5dZtLvKsR/ut18/27wmyLM3wRkqGzqT52tj/HalyGF8Jh43oqkA5VIbcId1EgFMF8ixuPV5cFrTUqFUuo6hQuH5fkQ0TGtJwPN7Z98OQhNXJu16+Nvg0t9CwRUMr3NEakpzpNIH/0aNW1nwpvNgmDQIvnOb07w5JaggCtNSBiIgQA/p/R5MhfNboo4jz/Tw/Um7aX50YLb/1Sd92hVE+lyKgMb+k7K2oLS0sycldPXp7lz/cPXp7sIBJInk01Zo8p5s9rMLW/aRHKJMJhO1MiQ2pWBzoQbhWeI9bdQPr15vXYsQYcGXmA6xRhOH/U1Os4BHDYpEEmE9fQDsHktyIweHNb+Hg9+Gbl84krU1ueLu3NMO13XVeqtGNa3KQfjH6hyUn/WwYzSjgqxwWY4QI2rFxa2FO0JE5btLhO/DjNFAv5PwgfjXg8mNNLY7wgq+NS9pIxcDWxk40cIncB0gJObxib4UZEFxeWPK3A6CykC0hXOJNE6bypHx2UwSlUnS5cfhMviLSwwx0KLjFGVIkpyzIuXX/cXX9dl3jMuM3hG9xb9+ee0TgixkKtHRyenl2UnkudF/jAN5RctSb9ijZ+cnJ0mTFZ5052PvmD2kcARnSVfH63xlVHYdem0AgpgsJVQJUpAZuCxL68138CAnC13zJXFjArkYgZoQVkBN92SEJk5y6f+mhYS/KvirEvzbepKcJfdRV7BHuRY2HSH4aXDuQHNYyjFDglSCQGzc5FiA9cXW6JayIkNfTerVEk5w9oUoe2CBq4qAU6YkxnmoJ9p6u2GHW091U/Br4kJUSVLOgugdM/Cj9dnB0Dt4sNil7Hap2jmmsDXhJO3z31hwfo+0FsgFtja4zwVujc4z210nUeXt3X0SVcxqpxwCeunJN9VnPMDWBSa5h9l/GG64eqOFoT+1JAvz++P9CQOvqTRTZM5FNz95t1WFqXWw+kL7NhKDTUqXE27xV62hLCGMINPcuL/AfmXE9ZzeEWYiNFSCvPEhd+vkDWNZmmNg6buOXj9UEOE2i8EN1GZJQpVWaqxQcXIkFVbyaOO4cbv/wD1VlYGDclypWjQEGsaKlJl9EzTrHeQlCx53VbEZn4q7/5rWoKlLekvKNTgoWV7WhcMqoTMmyWvIsbdhFzmKYdrMpmnJ81sIxQj0e40FZgrS9P5FP1yRstR/L7mw5QU09zg0hAgklrbe3OiFkWkBQY+5TbL6ttbLu8KiaJRHWk83CcY7L7Qg3pXSleO8qAfVKg/0Zhl4ti/KQBtE828gCeMvAqg2q4Aym5HEhU9CS2/mtfy9TA9bkybJnh1Fw3FbgD1rl3OWk0qZ9g0T++4EPdHcoE3MYyd4iHrqcsKbcWIZeIUMo06tyWsnJkNXKo6VhhNqRIqe1loIwlS5jqGZ3APKGiJM6iJmRfCTXVnIXwWqs3anIT/xIFPSEy/JHRlSXLMxGeH5wBSEa4vMKzJ7BHc/u3p+I4B+XWCrgJMRDf+VjXUuCWYgp++ICKIgaErUihDWpCroxfkR2u0oHkE03t+qJEvCFBFaaC3xLUGyFp5ISlyqFpNUKo3ApmttzACyyUzdBl6DfDE/oK+afVTNsAJpCkntNtRrahqghxAz8YZclWu0Jkoz6n+ggpvUJi5uI5CUIehSYERo9OhKov/2w+n4/F+ck8Sb5t4t+h+QJsXFrSYE9hIYUo2BHQE0Dhua38p0T4RrUqHTl+jkxeX44vL0xJwaX799d3li6Li2isL8K1o0vWyCYGVbA5k3TjP74enJSfKbFRdLrR1yIuWs1sJbKl5VpHCfmb+lyP96epLp/522IBRS/XWcnWbjbCwr9dfT8dl44C5A6DNegWHuE2a0tcEUFZ73v1oPV0GWnEklsLLtiZgicz0TCcFmRbfJfLBcQVlBvhGTUFHw/CbICyio1MtfGFmFmX592m6NYrJuSGFSLqmvFRBaDJE7bQ1pnTC5MW606CAJuBO1vp6M8FlnxyywXNxvtzRs1YTNU//16t9fvxm8ZD9juUBPKiIWuAIbwuRazyibE1EJytRTvYoCr+wCKA627lQrX97mnYGrurv/qTeFc4sp6CppEplg7hFm7gTFBRQZ4ELvc4kU77MiDDS5cC5U32cbS1Rh47NvkhG9vKUKVVxK2u5wC/tBkRzeNEpU09EhcEq08krZbWZ3uQ+ohFykKJ8TdGwtlUkhi2rhQHE8itfRqbEuNY1/Ycs8EWcGoICuk+w0S/uu4EmPEWVrzrbp8k2eQ1e2FqpiPQsMM5724fmTpKne6CBvJRlvQG5Wx1WBtFPNkvm89uU+BmzqqbT5S6WiLFdGZP1b8MyWDAY/OeQd+8BVXxYN7MylVgKpkiC14s1Tf+xNWzHYjK9FjBEL0I5JM2Rr4NQkJxtPmOGLCOZ0jd7ZUgaQ9KAIwJ2U4zJDk2acE8PrYdVO+3YFx35K4Fw5eR9SOGqtmyfWD4GGydQh40tt1ZoAC64qc0yscH6rVaI5lepTh/HXJRan4/9tXknQ62I2DoGe2DTlXabcwmtXtjwM5i9efD3/fu5H4SgasQgVp+lNJai8vZE5F90j4azkeKBr7zOVtwigmGNuXNluRviEZPMsOJHzsoYz9NN42b5Kgta8di1VfpTetLUHYr1YWwdzo8/M+4zoFzhz0z9IAVC3DG5k0k5ljk23nhPNaKcuOJD03iwxZeVaL82sLhGd6UHDEQL8DGqBGcTXndtDiw8sJZ23REZDnISKAwCzwkbZSUIQtu4DGIqZwaD8w9Z6JbyiQafNlgfU+kjfNS/0Jij7WkofSY3TIUA3NwXM2/2ePp4f9oFNOKIjisIGaiEyZFIXbnoznvBqP39BB7Fffb0q7AgzXK7/8KaBixobnoggQRXIfC7I3FxoEjFWUwUi5kTd7DQ3X+Ab03ZpBo2MTK/gbXPUN0ubmh9vtH8PN1cDZ4t8U4TJdtlxl/IN3UdK0kDpbHUg38pgXJZ8hQiWaz02RUDtTNfGOehBBJPurbHKGlbtpQ490wPoBlrB2frE9DEoqBihZr2fJqeondWwHc8bF5Dsy39o9l8LF2Vh6GcAqivoy+wdBy7KY/ytzP+3kXBJlGHbsB3X/otruHf1Bj35evXmKcyl021BaO3JNTxsBh/26G/RA092XlX46kdTxt446Fqg57sN9ZOgSyzWtiebHuNPrWGksUTN5nbGE2Zl9OJYbmeT5ihzcX6SRvxB8064KpQhnitctjxRSRIk/aNNQnQA6q6R/kKjmK4VkXoLWg8K1yYALgpnG040tEnYxEL/mWgKJ+ktuoxychMHooiY91gqc2MZDBrCktb4XPJCc2yRxJLvg2VJFIbIgKm2LRLGxpzw2Lj4yf8wLPz6E+FhpD/HQqzD8iHcJF6Xrt1iUDjlTvYeHheapsipDkqFoatPcTOj4ZFaPduUEabaTeH3LdFxcBPtH3nNlFjfUMlv9g+tvzbQ0NX1RwiwJ1J77dx28MwJv4FkkWGY3nM2pwqCeaxAJVbwjy4+U4tzgPm0NTfphOWcqvUBcLzWqqEloVO948wO+Ln5ZdgW0B+0re2Qf0N2N33m0CvjB3dhcw+qWqylPk66MpURwuiOClWHP+ntgN5Abn47gd8D+sVFLoNMrSju1ype9AV7YV+feGdGRdbHOS9LkivnPw7rMSEk4H0i5VqfsRghBbnH1v0vl8m2yevdJLd15mn/TQKM6fqoRJ2zOh0So+9MiME7mlbaAJ24bye2kxRUh35l9Js799pSzrpsRUh/r3EJ2tAmP9vuXMDyQIzVJq1YvPE5ERYXZurx5rTwTlwz9Yrrb3rnvDO1g/J8dkuztqk/hu9SbqdXMmpcxrhCuFzhtbTFV6ZfmQ35GBeFIBAnpWzePpZRZvw6g6rBLiO/de1iWBPfD26SqJK5fw4yyE5auUTk/qLB/Zj7Z1v6twXPAfJEbVpNz2Z5x4WtqnOFvbbDhRWdUfGyBgU9gya++HESu+yuZuhuOXKlXNbnGNU3jUJXclDDF2iDCGLDQv1sY/6kN80P6KNvFWfueUui8gcvmVUlVrOUz3Cnef/YblDnwKInOWGKyxGqpzVT9QitKCv4SprU/qcpOVtgsbLFFSmKB8raJlj5Aefo4zX63wNDkp2xdA6XETkzvKTlkCy/oPssmVLMhpJzjQwK9ESQYoHVCJnvR9DAYSqL5JymSB0e7QwivSfZ6Ti7uO/cRUn5HZqwyBdUEWjUsBNV315c3Fyc35eoEG3KJlWqatmkX7582skm7bao0CAgJEqkkmDdCyIrzoJCsR1KUg2cbEnUgu+ZB/uzUpUDiAzAZHj0p7dfRujTx2v9/1+/JEgyo8nsLUvJU9dwU9FSZWAiA7N19gpoOz857ydoyovu9hyevf3FGkrAFg1JGmqSFtM/ZsVF2W0LdpByF5iaTrFL2M44O+0yddBn2vD0e//DZh5umsZ4T4K9rOm+nbOgSdd+c/Cezw0YZx17ehJav1POgSa/vvr8y2SEJm8/f9Z/Xf3y7mO6VOPt589dSbpXyll/blbJc1yCUfphjVqN23dK+emdvhZjN629fKgx7P6uhVSUKwDbIHgjAjclMw5MUlIFwpYqe/Ger5OtsEgm/V6Z84sA95k5EE8siokNezTJ4u6kg1kQi9aQI5ABW1hI1k5L5OG4wY86A8xSR60FviMIl4LgYo2k5i3jQsztlfu4qkoKtUW3BBGW88JmWDMSB4xKyoiElj13tpFTSTCD9MmtfaLulZCGJLeZZj92MtJ+r4mAY52tzTCHtUFJaZGcsckAsaz5JfrxvirU14ZihXeXOkmzcbgaAMejKWeYrm1DZqiU4kgSmxRvmI4KR2laj4Ki/ZXOaPC0L9bYH23cFG/cEnHcZzCdaa0EVzzne8rzX1wKiYWGejOuA+MsiNdRQQ5QuvHGgXHiw3GcEng2o3liH34mOV8uCStckgHsuPat2X9BlE15zdrL9BfEa5V+ULNbxlcsNQUhrM5U2CILUtzs6xYI6pN95pGNaQaPqL9w61vaHjp9Oc5Os9NsHNP7g21kJjsjsMPLIGa0hwnpeMrCMzGoNIkvuuajo8L0pjgkHRZimpJuo17HIQebDwdwxwnxdBxuRjwlO06J4gqXB5sPgGYnwzgy66VpQBTMO/rvrYVI0np28aKH2O84aSma7bOQ6i4FnuzxeVePh92wYmX+sftkeKlo1GTLBm0IE9q4g6jlisI1aKlqUbiUhK21JQU9t5pDXVgGjqXkOTVZh1QtUq2j1rxGWAhoIm6KfBQRBkBTIYSZsahAQcb9XjzecDD3OAftaZGE67DJR/X9yqbD8Wcx98gWz7S8kjvzzcfrdiP8NJO0b8rIQihxT2g+U6Z4Sa83tMk0vtlKkBn9RuTIl0lCPCXjMvvLRPPBBO4XM02y4cfdl/67e12B9B7X69N0t7HG67qVSR/G2xqS8YBeVrfq27ytT/dpZ9JxsB6JfGiZU5+TFconoVBGKuFLqEP6zD35O5F3np1nJ0enp+MjWwJ8XyLtHf0baY1kSOtKcSNIPkU/3qcfRq/4wN1bYzudNLz+aNoP2rrRuA5VazEPD9HiONpGtudueMI3Um7iKKhoMbECSiq8li6xzyBzjTX0UT9Imcp5RZuUgnnJp7gMmqk7ktvu+OFSC4tB3dY3JQbbGcFiXi97SsA/4DWaEquWfTsqqE6ShEkKYf9kV6GAb//++Kh8PDJXN+q/Xa3hxeN/3lfEDRhWQgsj64CE8gSU47IkhbvN3ib+CSTpkpY4XdMug2o9vzW23/s6gNr2pRYb8B0GYYUhqt0JuTfZJmrfCn2HCkD1VIXpTQbPR3aLKVcxg6Xfsz35SnGfbCuUrqMfhxs1rid2u3WiCp9BZ1ojMprUIGMr43Dv23ygPoN3RllhPbpOckFhFWT3ede+h+fQ6y9SMbw/s2uPdc64NuLu2qDUYptrT2wyusndKNdNR1/wCAfXDkF5yi2RmwolW/MXtA4wa8WCQEk/aT7d42pmzyMEkW8VEZSwHLznUkLLfq1JNExBCugeYdo+j/RHEUCtnexJhtuqO1q4WhhHICQVulWHdyRlc8gCtp2p25Q25uHZc/KMTGfkBJOL/Pzl83ExJS9nJ6fPz/Hpxdnz6fTF+Pz57CL4dnNez0CpuzGCQkosFc1NLfVAwyTMIHVc3vTvsLtoQxsxI7RbVzCYPO7E9orYQ+/huNU7GsgiAMs0WDYLCY0SQmLdlVYTB9Dkf7lrjCLIE2CmyX5ZOLulXFkRCdB68EoV17MeBvFrm0oF0Fvrvo8Bv5Evz7JxNjQ7oXWhl2PJUMoP4UsqTbGNNNFZfouwNmmNV4Mok3EfC/vwlkfcz5Th/DzQvVZuEg5+s5Ub2B53W9WijLX/18/vN6v6r5/ft/OTMXizSqKIfjoyYl7mekpG9n4QuCcSWw9WgMT1h25ic66Hzmb3RS3K7C8TzQKPWqPN0N8IMUHH5tqUoA3LakEYuSPCV2o2A7qnTbAQZNZhn+Ger3d1Wep1MFPjo6BDrhaa6M80+ompsPs7OPUMjH8+WShVycvj49VqlVndkuX8eF7TghwTdhyBipTPsSCQb52T44tsHL9o7gSwE7ZQy/KHmzDed6MX/8Y5F29svZ+QT83wrG6K92d7pOG4NOMoIlV63JmrJ5y0LEXCoKWGXmPFtXGFMASF1wjPsbYPeoPstSiRVLQsbduaJgXAhrI1v2h7RG9MUyCTWplmVRhqFT1Kc6StsDCs3py0XQp/bnoHxMaavQhyEo9bbxUT7e6ePh84Dutzi75+fr9P3Wdf5adl1DB2qtm7Ye3L8/OzY8PB//r7XyOO/kHxbqDViKj9JP81wPBWvMk8a6TVY6DycaoKAO5mAj/J5cSlPbhuJyC9AHL/0Lty6Ls0Vu4OqZnwx/ZyXffnipkUEtPfCcNWWeI1AnFiK7SuPuk9fcwFdHO3we5ybbQGeK4ikEHmfmaucIUEZ2ku8Y7CumAczrlPumnqBlr31oe3LPmxdN0EqWbz0IInqgsY2G++M43n52fp7L/zsy4pYS347hoGirJ7l9PumMfZnyc5NJ8Y6+DVQaWFIxYk/x4TqHep0R6GoLgvnXliPL/taY4VnZvylnBKiQcQDP8KgoF8g46YQY+SECMUC5mtluxGw7iGA7vF94wOxuJqjcwzDDi1ceneGrWUUDwRxpS1HmKGyLJSDV0wBPPGJIJiILQOnb7Gi+qjjevG51qlmI58fy6HGrK1iP5efDoTeL6MW//cx2vIRZj2ow0aPINGhXpBfpgEe1/xqpf5fkhqJUdil3hXub4f8V8tlNZG6qKrsJQtsPfq7WGgJNE9ag+vdVSSO1435dsNtF2k6egvvOp4SpCS3OGANRRHYRfMd0FYB9+ZW10I1PmGd7voXyi0tgwPyYBo4Zrj+qY1tBg1RzwGSQZrS4/p0Wuaz/Dm8KMWTYz64XyqH1sX2NRtH6u/kiJutXu4iEnohWtwdLaUP9thB94cjKF+0/RVRIrfEkb/IIlbrMgS03umaW/ZcAZ0XM+GDtJkcbsr3DHfInZHd2r2zYuQy8LZegmNkPQribn+6rsxQXID+EdcpoP1JLrAac7ZzDBK+1KYVhaj73zZbsMVygeTRtGVEij8fTdZYUA6idE4hrSZbSOvU8FXGomTXfrbtQkGeXBywVc2gX1Fpt4lBZ7YdtdmezCtPeGtCPzwnd1bWzDc9PrKLDl3sWcxyFrpoG01vNl7S/tC+v5LZg5QCdNynW5FusS/JS62GR7H/KC/T00r6pnWJWX7IdTf74KwwiofInc2H33yxS44d80Qer0QfDmwcWVbTfTRMLwsdCCy/jyye9VTDmfiQYi/CyMPw/w9OLqL+UjPYXNdeHhVuC/d9v11HiUxfnDdd0BK562qb9N7yPYwwEVxAy/c+JY9Ng3A3JvihHWkvfSrGXyVtS6lTlxI3TMjmy6c/mTjz303Trdume6jzUV2m4BNDy1brzTeiiHYgdtwtBtbbMdiX7gJopO9lxP3YN9+KXEf8r7buvtv6u4hYeebuDewHFwS6le1aZxnnhx9G8569hNNS3j1cD/26CrwHgTDbvO2G91fdesuM3T/TgC3rXfg6oV2ENw803tWLrhQN2CszpvaSMzyBRcO35Hf5Y9ii8ybReENvLs1vDJdgR7krt4A3TJ1U9UDXtjbkLK/Ot52nW6D66Ev1rVMa3tXxX2rEkCu2IyHjGqz59v3oTve1L9v5cywb9bww4XMBmcAb/HttlX2j7KV4Otn6bae6gemVsfO1d/C3xKYmudNE71IYzdAUThTmzd989HW6Y2I3m2SK14cgPmDGah4YWzsJKp6XxETYPrEC/T16k0Xkf5/WeF9T4gBqgZiFxkvDnHzeIiMF6RnCoeKjmGIDDS0xFUXE3hDjBv7UOgCkGmchxTHAd48ksyb0B5AISXxGrj/PwAA//+acaX3" +} diff --git a/journalbeat/input/config.go b/journalbeat/input/config.go new file mode 100644 index 00000000000..d5c4b741236 --- /dev/null +++ b/journalbeat/input/config.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package input + +import ( + "fmt" + "time" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/processors" +) + +// Config stores the options of an input. +type Config struct { + // Paths stores the paths to the journal files to be read. + Paths []string `config:"paths"` + // MaxBackoff is the limit of the backoff time. + Backoff time.Duration `config:"backoff" validate:"min=0,nonzero"` + // Backoff is the current interval to wait before + // attemting to read again from the journal. + BackoffFactor int `config:"backoff_factor" validate:"min=1"` + // BackoffFactor is the multiplier of Backoff. + MaxBackoff time.Duration `config:"max_backoff" validate:"min=0,nonzero"` + // Seek is the method to read from journals. + Seek string `config:"seek"` + // Matches store the key value pairs to match entries. + Matches []string `config:"include_matches"` + + // Fields and tags to add to events. + common.EventMetadata `config:",inline"` + // Processors to run on events. + Processors processors.PluginConfig `config:"processors"` +} + +var ( + // DefaultConfig is the defaults for an inputs + DefaultConfig = Config{ + Backoff: 1 * time.Second, + BackoffFactor: 2, + MaxBackoff: 60 * time.Second, + Seek: "tail", + } +) + +// Validate check the configuration of the input. +func (c *Config) Validate() error { + correctSeek := false + for _, s := range []string{"cursor", "head", "tail"} { + if c.Seek == s { + correctSeek = true + } + } + + if !correctSeek { + return fmt.Errorf("incorrect value for seek: %s. possible values: cursor, head, tail", c.Seek) + } + + return nil +} diff --git a/journalbeat/input/input.go b/journalbeat/input/input.go new file mode 100644 index 00000000000..42d8a0ea394 --- /dev/null +++ b/journalbeat/input/input.go @@ -0,0 +1,197 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package input + +import ( + "fmt" + "sync" + + "github.com/gofrs/uuid" + + "github.com/elastic/beats/journalbeat/checkpoint" + "github.com/elastic/beats/journalbeat/reader" + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/libbeat/processors" +) + +// Input manages readers and forwards entries from journals. +type Input struct { + readers []*reader.Reader + done chan struct{} + config Config + pipeline beat.Pipeline + states map[string]checkpoint.JournalState + id uuid.UUID + logger *logp.Logger + eventMeta common.EventMetadata + processors beat.ProcessorList +} + +// New returns a new Inout +func New( + c *common.Config, + pipeline beat.Pipeline, + done chan struct{}, + states map[string]checkpoint.JournalState, +) (*Input, error) { + config := DefaultConfig + if err := c.Unpack(&config); err != nil { + return nil, err + } + + id, err := uuid.NewV4() + if err != nil { + return nil, fmt.Errorf("error while generating ID for input: %v", err) + } + + logger := logp.NewLogger("input").With("id", id) + + var readers []*reader.Reader + if len(config.Paths) == 0 { + cfg := reader.Config{ + Path: reader.LocalSystemJournalID, // used to identify the state in the registry + Backoff: config.Backoff, + MaxBackoff: config.MaxBackoff, + Seek: config.Seek, + Matches: config.Matches, + } + + state := states[reader.LocalSystemJournalID] + r, err := reader.NewLocal(cfg, done, state, logger) + if err != nil { + return nil, fmt.Errorf("error creating reader for local journal: %v", err) + } + readers = append(readers, r) + } + + for _, p := range config.Paths { + cfg := reader.Config{ + Path: p, + Backoff: config.Backoff, + MaxBackoff: config.MaxBackoff, + Seek: config.Seek, + Matches: config.Matches, + } + state := states[p] + r, err := reader.New(cfg, done, state, logger) + if err != nil { + return nil, fmt.Errorf("error creating reader for journal: %v", err) + } + readers = append(readers, r) + } + + processors, err := processors.New(config.Processors) + if err != nil { + return nil, err + } + + logger.Debugf("New input is created for paths %v", config.Paths) + + return &Input{ + readers: readers, + done: done, + config: config, + pipeline: pipeline, + states: states, + id: id, + logger: logger, + eventMeta: config.EventMetadata, + processors: processors, + }, nil +} + +// Run connects to the output, collects entries from the readers +// and then publishes the events. +func (i *Input) Run() { + client, err := i.pipeline.ConnectWith(beat.ClientConfig{ + PublishMode: beat.GuaranteedSend, + EventMetadata: i.eventMeta, + Meta: nil, + Processor: i.processors, + ACKCount: func(n int) { + i.logger.Infof("journalbeat successfully published %d events", n) + }, + }) + if err != nil { + i.logger.Error("Error connecting to output: %v", err) + return + } + defer client.Close() + + i.publishAll(client) +} + +// publishAll reads events from all readers and publishes them. +func (i *Input) publishAll(client beat.Client) { + out := make(chan *beat.Event) + defer close(out) + + var wg sync.WaitGroup + defer wg.Wait() + for _, r := range i.readers { + wg.Add(1) + r := r + go func() { + defer wg.Done() + + for { + select { + case <-i.done: + return + default: + } + + event, err := r.Next() + if event == nil { + if err != nil { + i.logger.Errorf("Error while reading event: %v", err) + } + continue + } + + select { + case <-i.done: + case out <- event: + } + } + }() + } + + for { + select { + case <-i.done: + return + case e := <-out: + client.Publish(*e) + } + } +} + +// Stop stops all readers of the input. +func (i *Input) Stop() { + for _, r := range i.readers { + r.Close() + } +} + +// Wait waits until all readers are done. +func (i *Input) Wait() { + i.Stop() +} diff --git a/journalbeat/journalbeat.reference.yml b/journalbeat/journalbeat.reference.yml new file mode 100644 index 00000000000..2fcc03bf832 --- /dev/null +++ b/journalbeat/journalbeat.reference.yml @@ -0,0 +1,1191 @@ +###################### Journalbeat Configuration Example ######################### + +# This file is an example configuration file highlighting only the most common +# options. The journalbeat.reference.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/journalbeat/index.html + +# For more available modules and options, please see the journalbeat.reference.yml sample +# configuration file. + +#=========================== Journalbeat inputs ============================= + +journalbeat.inputs: + # Paths that should be crawled and fetched. Possible values files and directories. + # When setting a directory, all journals under it are merged. + # When empty starts to read from local journal. +- paths: [] + + # The number of seconds to wait before trying to read again from journals. + #backoff: 1s + # The maximum number of seconds to wait before attempting to read again from journals. + #max_backoff: 60s + + # Position to start reading from journal. Valid values: head, tail, cursor + seek: tail + + # Exact matching for field values of events. + # Matching for nginx entries: "systemd.unit=nginx" + #include_matches: [] + + # Optional fields that you can specify to add additional information to the + # output. Fields can be scalar values, arrays, dictionaries, or any nested + # combination of these. + #fields: + # env: staging + + +#========================= Journalbeat global options ============================ +#journalbeat: + # Name of the registry file. If a relative path is used, it is considered relative to the + # data path. + #registry_file: registry + + # The number of seconds to wait before trying to read again from journals. + #backoff: 1s + # The maximum number of seconds to wait before attempting to read again from journals. + #max_backoff: 60s + + # Position to start reading from all journal. Possible values: head, tail, cursor + #seek: head + + # Exact matching for field values of events. + # Matching for nginx entries: "systemd.unit=nginx" + #matches: [] + +#================================ General ====================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue configuration for buffering events to be published. +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to server + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 4096 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # The default value is set to 2048. + # A value of 0 ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 2048 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < min_flush_events. + #flush.timeout: 1s + + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed, if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch, as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +#================================ Processors =================================== + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, +# decode_json_fields, and add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +#- include_fields: +# fields: ["cpu"] +#- drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +#- drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example renames the field a to b: +# +#processors: +#- rename: +# fields: +# - from: "a" +# to: "b" +# +# The following example tokenizes the string into fields: +# +#processors: +#- dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, DigitalOcean, +# Tencent Cloud, and Alibaba Cloud. +# +#processors: +#- add_cloud_metadata: ~ +# +# The following example enriches each event with the machine's local time zone +# offset from UTC. +# +#processors: +#- add_locale: +# format: offset +# +# The following example enriches each event with docker metadata, it matches +# given fields to an existing container id and adds info from that container: +# +#processors: +#- add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" +# +# The following example enriches each event with docker metadata, it matches +# container id from log path available in `source` field (by default it expects +# it to be /var/lib/docker/containers/*/*.log). +# +#processors: +#- add_docker_metadata: ~ +# +# The following example enriches each event with host metadata. +# +#processors: +#- add_host_metadata: +# netinfo.enabled: false +# +# The following example enriches each event with process metadata using +# process IDs included in the event. +# +#processors: +#- add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent +# +# The following example decodes fields containing JSON strings +# and replaces the strings with valid JSON objects. +# +#processors: +#- decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false + +#============================= Elastic Cloud ================================== + +# These settings simplify using journalbeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ====================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------- +output.elasticsearch: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Configure escaping html symbols in strings. + #escape_html: true + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "journalbeat" plus date + # and generates [journalbeat-]YYYY.MM.DD keys. + # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. + #index: "journalbeat-%{[beat.version]}-%{+yyyy.MM.dd}" + + # Optional ingest node pipeline. By default no pipeline will be used. + #pipeline: "" + + # Optional HTTP Path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure http request timeout before failing a request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + +#----------------------------- Logstash output --------------------------------- +#output.logstash: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Configure escaping html symbols in strings. + #escape_html: true + + # Optional maximum time to live for a connection to Logstash, after which the + # connection will be re-established. A value of `0s` (the default) will + # disable this feature. + # + # Not yet supported for async connections (i.e. with the "pipelining" option set) + #ttl: 30s + + # Optional load balance the events between the Logstash hosts. Default is false. + #loadbalance: false + + # Number of batches to be sent asynchronously to Logstash while processing + # new batches. + #pipelining: 2 + + # If enabled only a subset of events in a batch of events is transferred per + # transaction. The number of events to be sent increases up to `bulk_max_size` + # if no error is encountered. + #slow_start: false + + # The number of seconds to wait before trying to reconnect to Logstash + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Logstash after a network error. The default is 60s. + #backoff.max: 60s + + # Optional index name. The default index name is set to journalbeat + # in all lowercase. + #index: 'journalbeat' + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting + # and retry until all events are published. Set max_retries to a value less + # than 0 to retry until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Logstash request. The + # default is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Logstash server before + # timing out. The default is 30s. + #timeout: 30s + +#------------------------------- Kafka output ---------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from where to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version journalbeat is assumed to run against. Defaults to the "1.0.0". + #version: '1.0.0' + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # Metadata update configuration. Metadata do contain leader information + # deciding which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Waiting time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # Set the compression level. Currently only gzip provides a compression level + # between 0 and 9. The default value is chosen by the compression algorithm. + #compression_level: 4 + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- Redis output ---------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # The list of Redis servers to connect to. If load balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + #hosts: ["localhost:6379"] + + # The Redis port to use if hosts does not contain a port number. The default + # is 6379. + #port: 6379 + + # The name of the Redis list or channel the events are published to. The + # default is journalbeat. + #key: journalbeat + + # The password to authenticate with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The number of seconds to wait before trying to reconnect to Redis + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Redis after a network error. The default is 60s. + #backoff.max: 60s + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # Optional SSL configuration options. SSL is off by default. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + +#------------------------------- File output ----------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/journalbeat" + + # Name of the generated files. The default is `journalbeat` and it generates + # files: `journalbeat`, `journalbeat.1`, `journalbeat.2`, etc. + #filename: journalbeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every journalbeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + # Permissions to use for file creation. The default is 0600. + #permissions: 0600 + + +#----------------------------- Console output --------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping html symbols in strings. + #escape_html: true + +#================================= Paths ====================================== + +# The home path for the journalbeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the journalbeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the journalbeat installation. This is the default base path +# for all the files in which journalbeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a journalbeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +#================================ Keystore ========================================== +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + +#============================== Dashboards ===================================== +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards are disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +#setup.dashboards.enabled: false + +# The directory from where to read the dashboards. The default is the `kibana` +# folder in the home path. +#setup.dashboards.directory: ${path.home}/kibana + +# The URL from where to download the dashboards archive. It is used instead of +# the directory if it has a value. +#setup.dashboards.url: + +# The file archive (zip file) from where to read the dashboards. It is used instead +# of the directory when it has a value. +#setup.dashboards.file: + +# In case the archive contains the dashboards from multiple Beats, this lets you +# select which one to load. You can load all the dashboards in the archive by +# setting this to the empty string. +#setup.dashboards.beat: journalbeat + +# The name of the Kibana index to use for setting the configuration. Default is ".kibana" +#setup.dashboards.kibana_index: .kibana + +# The Elasticsearch index name. This overwrites the index name defined in the +# dashboards and index pattern. Example: testbeat-* +#setup.dashboards.index: + +# Always use the Kibana API for loading the dashboards instead of autodetecting +# how to install the dashboards by first querying Elasticsearch. +#setup.dashboards.always_kibana: false + +# If true and Kibana is not reachable at the time when dashboards are loaded, +# it will retry to reconnect to Kibana instead of exiting with an error. +#setup.dashboards.retry.enabled: false + +# Duration interval between Kibana connection retries. +#setup.dashboards.retry.interval: 1s + +# Maximum number of retries before exiting with an error, 0 for unlimited retrying. +#setup.dashboards.retry.maximum: 0 + + +#============================== Template ===================================== + +# A template is used to set the mapping in Elasticsearch +# By default template loading is enabled and the template is loaded. +# These settings can be adjusted to load your own template or overwrite existing ones. + +# Set to false to disable template loading. +#setup.template.enabled: true + +# Template name. By default the template name is "journalbeat-%{[beat.version]}" +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +#setup.template.name: "journalbeat-%{[beat.version]}" + +# Template pattern. By default the template pattern is "-%{[beat.version]}-*" to apply to the default index settings. +# The first part is the version of the beat and then -* is used to match all daily indices. +# The template name and pattern has to be set in case the elasticsearch index pattern is modified. +#setup.template.pattern: "journalbeat-%{[beat.version]}-*" + +# Path to fields.yml file to generate the template +#setup.template.fields: "${path.config}/fields.yml" + +# A list of fields to be added to the template and Kibana index pattern. Also +# specify setup.template.overwrite: true to overwrite the existing template. +# This setting is experimental. +#setup.template.append_fields: +#- name: field_name +# type: field_type + +# Enable json template loading. If this is enabled, the fields.yml is ignored. +#setup.template.json.enabled: false + +# Path to the json template file +#setup.template.json.path: "${path.config}/template.json" + +# Name under which the template is stored in Elasticsearch +#setup.template.json.name: "" + +# Overwrite existing template +#setup.template.overwrite: false + +# Elasticsearch template settings +setup.template.settings: + + # A dictionary of settings to place into the settings.index dictionary + # of the Elasticsearch template. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html + #index: + #number_of_shards: 1 + #codec: best_compression + #number_of_routing_shards: 30 + + # A dictionary of settings for the _source field. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html + #_source: + #enabled: false + +#============================== Kibana ===================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + #host: "localhost:5601" + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Optional HTTP Path + #path: "" + + # Use SSL settings for HTTPS. Default is true. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + + +#================================ Logging ====================================== +# There are four options for the log output: file, stderr, syslog, eventlog +# The file output is the default. + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: info + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are "beat", "publish", "service" +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: false + +# Send all logging output to Windows Event Logs. The default is false. +#logging.to_eventlog: false + +# If enabled, journalbeat periodically logs its internal metrics that have changed +# in the last period. For each metric that changed, the delta from the value at +# the beginning of the period is logged. Also, the total values for +# all non-zero internal metrics are logged on shutdown. The default is true. +#logging.metrics.enabled: true + +# The period after which to log the internal metrics. The default is 30s. +#logging.metrics.period: 30s + +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/journalbeat + + # The name of the files where the logs are written to. + #name: journalbeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # unix epoch. Defaults to disabled. + #interval: 0 + +# Set to true to log messages in json format. +#logging.json: false + + +#============================== Xpack Monitoring ===================================== +# journalbeat can export internal metrics to a central Elasticsearch monitoring cluster. +# This requires xpack monitoring to be enabled in Elasticsearch. +# The reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#xpack.monitoring.enabled: false + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. Any setting that is not set is +# automatically inherited from the Elasticsearch output configuration, so if you +# have the Elasticsearch output configured, you can simply uncomment the +# following line, and leave the rest commented out. +#xpack.monitoring.elasticsearch: + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "beats_system" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the url with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure http request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Configure SSL verification mode. If `none` is configured, all server hosts + # and certificates will be accepted. In this mode, SSL based connections are + # susceptible to man-in-the-middle attacks. Use only for testing. Default is + # `full`. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions 1.0 up to + # 1.2 are enabled. + #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] + + # SSL configuration. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the Certificate Key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + #metrics.period: 10s + #state.period: 1m + +#================================ HTTP Endpoint ====================================== +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 + +#============================= Process Security ================================ + +# Enable or disable seccomp system call filtering on Linux. Default is enabled. +#seccomp.enabled: true diff --git a/journalbeat/journalbeat.yml b/journalbeat/journalbeat.yml new file mode 100644 index 00000000000..359042a9289 --- /dev/null +++ b/journalbeat/journalbeat.yml @@ -0,0 +1,177 @@ +###################### Journalbeat Configuration Example ######################### + +# This file is an example configuration file highlighting only the most common +# options. The journalbeat.reference.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/journalbeat/index.html + +# For more available modules and options, please see the journalbeat.reference.yml sample +# configuration file. + +#=========================== Journalbeat inputs ============================= + +journalbeat.inputs: + # Paths that should be crawled and fetched. Possible values files and directories. + # When setting a directory, all journals under it are merged. + # When empty starts to read from local journal. +- paths: [] + + # The number of seconds to wait before trying to read again from journals. + #backoff: 1s + # The maximum number of seconds to wait before attempting to read again from journals. + #max_backoff: 60s + + # Position to start reading from journal. Valid values: head, tail, cursor + seek: tail + + # Exact matching for field values of events. + # Matching for nginx entries: "systemd.unit=nginx" + #include_matches: [] + + # Optional fields that you can specify to add additional information to the + # output. Fields can be scalar values, arrays, dictionaries, or any nested + # combination of these. + #fields: + # env: staging + + +#========================= Journalbeat global options ============================ +#journalbeat: + # Name of the registry file. If a relative path is used, it is considered relative to the + # data path. + #registry_file: registry + + # The number of seconds to wait before trying to read again from journals. + #backoff: 1s + # The maximum number of seconds to wait before attempting to read again from journals. + #max_backoff: 60s + + # Position to start reading from all journal. Possible values: head, tail, cursor + #seek: head + + # Exact matching for field values of events. + # Matching for nginx entries: "systemd.unit=nginx" + #matches: [] + +#================================ General ===================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. +#fields: +# env: staging + + +#============================== Dashboards ===================================== +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards is disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +#setup.dashboards.enabled: false + +# The URL from where to download the dashboards archive. By default this URL +# has a value which is computed based on the Beat name and version. For released +# versions, this URL points to the dashboard archive on the artifacts.elastic.co +# website. +#setup.dashboards.url: + +#============================== Kibana ===================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + #host: "localhost:5601" + + # Kibana Space ID + # ID of the Kibana Space into which the dashboards should be loaded. By default, + # the Default Space will be used. + #space.id: + +#============================= Elastic Cloud ================================== + +# These settings simplify using journalbeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +#================================ Outputs ===================================== + +# Configure what output to use when sending the data collected by the beat. + +#-------------------------- Elasticsearch output ------------------------------ +output.elasticsearch: + # Array of hosts to connect to. + hosts: ["localhost:9200"] + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + +#----------------------------- Logstash output -------------------------------- +#output.logstash: + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Optional SSL. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + +#================================ Procesors ===================================== + +# Configure processors to enhance or manipulate events generated by the beat. + +processors: + - add_host_metadata: ~ + - add_cloud_metadata: ~ + +#================================ Logging ===================================== + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: debug + +# At debug level, you can selectively enable logging only for some components. +# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# "publish", "service". +#logging.selectors: ["*"] + +#============================== Xpack Monitoring =============================== +# journalbeat can export internal metrics to a central Elasticsearch monitoring +# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The +# reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#xpack.monitoring.enabled: false + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. Any setting that is not set is +# automatically inherited from the Elasticsearch output configuration, so if you +# have the Elasticsearch output configured, you can simply uncomment the +# following line. +#xpack.monitoring.elasticsearch: diff --git a/journalbeat/magefile.go b/journalbeat/magefile.go new file mode 100644 index 00000000000..d8566ab9c74 --- /dev/null +++ b/journalbeat/magefile.go @@ -0,0 +1,228 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build mage + +package main + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" + "github.com/pkg/errors" + + "github.com/elastic/beats/dev-tools/mage" +) + +func init() { + mage.BeatDescription = "Journalbeat ships systemd journal entries to Elasticsearch or Logstash." + + mage.Platforms = mage.Platforms.Filter("linux !linux/ppc64 !linux/mips64") +} + +const ( + libsystemdDevPkgName = "libsystemd-dev" +) + +// Build builds the Beat binary. +func Build() error { + return mage.Build(mage.DefaultBuildArgs()) +} + +// GolangCrossBuild build the Beat binary inside of the golang-builder. +// Do not use directly, use crossBuild instead. +func GolangCrossBuild() error { + if d, ok := deps[mage.Platform.Name]; ok { + mg.Deps(d) + } + return mage.GolangCrossBuild(mage.DefaultGolangCrossBuildArgs()) +} + +// BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon). +func BuildGoDaemon() error { + return mage.BuildGoDaemon() +} + +// CrossBuild cross-builds the beat for all target platforms. +func CrossBuild() error { + return mage.CrossBuild(mage.ImageSelector(selectImage)) +} + +// CrossBuildXPack cross-builds the beat with XPack for all target platforms. +func CrossBuildXPack() error { + return mage.CrossBuildXPack(mage.ImageSelector(selectImage)) +} + +// CrossBuildGoDaemon cross-builds the go-daemon binary using Docker. +func CrossBuildGoDaemon() error { + return mage.CrossBuildGoDaemon(mage.ImageSelector(selectImage)) +} + +// Clean cleans all generated files and build artifacts. +func Clean() error { + return mage.Clean() +} + +// Package packages the Beat for distribution. +// Use SNAPSHOT=true to build snapshots. +// Use PLATFORMS to control the target platforms. +func Package() { + start := time.Now() + defer func() { fmt.Println("package ran for", time.Since(start)) }() + + mage.UseElasticBeatPackaging() + + mg.Deps(Update) + mg.Deps(CrossBuild, CrossBuildXPack, CrossBuildGoDaemon) + mg.SerialDeps(mage.Package, TestPackages) +} + +// TestPackages tests the generated packages (i.e. file modes, owners, groups). +func TestPackages() error { + return mage.TestPackages() +} + +// Update updates the generated files (aka make update). +func Update() error { + return sh.Run("make", "update") +} + +// Fields generates a fields.yml for the Beat. +func Fields() error { + return mage.GenerateFieldsYAML() +} + +// GoTestUnit executes the Go unit tests. +// Use TEST_COVERAGE=true to enable code coverage profiling. +// Use RACE_DETECTOR=true to enable the race detector. +func GoTestUnit(ctx context.Context) error { + return mage.GoTest(ctx, mage.DefaultGoTestUnitArgs()) +} + +// GoTestIntegration executes the Go integration tests. +// Use TEST_COVERAGE=true to enable code coverage profiling. +// Use RACE_DETECTOR=true to enable the race detector. +func GoTestIntegration(ctx context.Context) error { + return mage.GoTest(ctx, mage.DefaultGoTestIntegrationArgs()) +} + +// ----------------------------------------------------------------------------- +// Customizations specific to Journalbeat. +// - Install required headers on builders for different architectures. + +var ( + deps = map[string]func() error{ + "linux/386": installLinux386, + "linux/amd64": installLinuxAMD64, + "linux/arm64": installLinuxARM64, + "linux/armv5": installLinuxARMLE, + "linux/armv6": installLinuxARMLE, + "linux/armv7": installLinuxARMHF, + "linux/mips": installLinuxMIPS, + "linux/mipsle": installLinuxMIPSLE, + "linux/mips64le": installLinuxMIPS64LE, + "linux/ppc64le": installLinuxPPC64LE, + "linux/s390x": installLinuxS390X, + + // No deb packages + //"linux/ppc64": installLinuxPpc64, + //"linux/mips64": installLinuxMips64, + } +) + +func installLinuxAMD64() error { + return installDependencies(libsystemdDevPkgName, "") +} + +func installLinuxARM64() error { + return installDependencies(libsystemdDevPkgName+":arm64", "arm64") +} + +func installLinuxARMHF() error { + return installDependencies(libsystemdDevPkgName+":armhf", "armhf") +} + +func installLinuxARMLE() error { + return installDependencies(libsystemdDevPkgName+":armel", "armel") +} + +func installLinux386() error { + return installDependencies(libsystemdDevPkgName+":i386", "i386") +} + +func installLinuxMIPS() error { + return installDependencies(libsystemdDevPkgName+":mips", "mips") +} + +func installLinuxMIPS64LE() error { + return installDependencies(libsystemdDevPkgName+":mips64el", "mips64el") +} + +func installLinuxMIPSLE() error { + return installDependencies(libsystemdDevPkgName+":mipsel", "mipsel") +} + +func installLinuxPPC64LE() error { + return installDependencies(libsystemdDevPkgName+":ppc64el", "ppc64el") +} + +func installLinuxS390X() error { + return installDependencies(libsystemdDevPkgName+":s390x", "s390x") +} + +func installDependencies(pkg, arch string) error { + if arch != "" { + err := sh.Run("dpkg", "--add-architecture", arch) + if err != nil { + return errors.Wrap(err, "error while adding architecture") + } + } + + if err := sh.Run("apt-get", "update"); err != nil { + return err + } + + return sh.Run("apt-get", "install", "-y", "--no-install-recommends", pkg) +} + +func selectImage(platform string) (string, error) { + tagSuffix := "main" + + switch { + case strings.HasPrefix(platform, "linux/arm"): + tagSuffix = "arm" + case strings.HasPrefix(platform, "linux/mips"): + tagSuffix = "mips" + case strings.HasPrefix(platform, "linux/ppc"): + tagSuffix = "ppc" + case platform == "linux/s390x": + tagSuffix = "s390x" + case strings.HasPrefix(platform, "linux"): + tagSuffix = "main-debian8" + } + + goVersion, err := mage.GoVersion() + if err != nil { + return "", err + } + + return mage.BeatsCrossBuildImage + ":" + goVersion + "-" + tagSuffix, nil +} diff --git a/journalbeat/main.go b/journalbeat/main.go new file mode 100644 index 00000000000..6bfbba776b2 --- /dev/null +++ b/journalbeat/main.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package main + +import ( + "os" + + "github.com/elastic/beats/journalbeat/cmd" +) + +func main() { + if err := cmd.RootCmd.Execute(); err != nil { + os.Exit(1) + } +} diff --git a/journalbeat/main_test.go b/journalbeat/main_test.go new file mode 100644 index 00000000000..c6eb420ed2c --- /dev/null +++ b/journalbeat/main_test.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package main + +// This file is mandatory as otherwise the journalbeat.test binary is not generated correctly. + +import ( + "flag" + "testing" + + "github.com/elastic/beats/journalbeat/cmd" +) + +var systemTest *bool + +func init() { + systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") + + cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) +} + +// Test started when the test binary is started. Only calls main. +func TestSystem(t *testing.T) { + + if *systemTest { + main() + } +} diff --git a/journalbeat/reader/fields.go b/journalbeat/reader/fields.go new file mode 100644 index 00000000000..0744f6fda49 --- /dev/null +++ b/journalbeat/reader/fields.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package reader + +import "github.com/coreos/go-systemd/sdjournal" + +type fieldConversion struct { + name string + isInteger bool + dropped bool +} + +var ( + journaldEventFields = map[string]fieldConversion{ + // provided by systemd journal + "COREDUMP_UNIT": fieldConversion{"journald.coredump.unit", false, false}, + "COREDUMP_USER_UNIT": fieldConversion{"journald.coredump.user_unit", false, false}, + "OBJECT_AUDIT_LOGINUID": fieldConversion{"journald.object.audit.login_uid", true, false}, + "OBJECT_AUDIT_SESSION": fieldConversion{"journald.object.audit.session", true, false}, + "OBJECT_CMDLINE": fieldConversion{"journald.object.cmd", false, false}, + "OBJECT_COMM": fieldConversion{"journald.object.name", false, false}, + "OBJECT_EXE": fieldConversion{"journald.object.executable", false, false}, + "OBJECT_GID": fieldConversion{"journald.object.gid", true, false}, + "OBJECT_PID": fieldConversion{"journald.object.pid", true, false}, + "OBJECT_SYSTEMD_OWNER_UID": fieldConversion{"journald.object.systemd.owner_uid", true, false}, + "OBJECT_SYSTEMD_SESSION": fieldConversion{"journald.object.systemd.session", false, false}, + "OBJECT_SYSTEMD_UNIT": fieldConversion{"journald.object.systemd.unit", false, false}, + "OBJECT_SYSTEMD_USER_UNIT": fieldConversion{"journald.object.systemd.user_unit", false, false}, + "OBJECT_UID": fieldConversion{"journald.object.uid", true, false}, + "_KERNEL_DEVICE": fieldConversion{"journald.kernel.device", false, false}, + "_KERNEL_SUBSYSTEM": fieldConversion{"journald.kernel.subsystem", false, false}, + "_SYSTEMD_INVOCATION_ID": fieldConversion{"systemd.invocation_id", false, false}, + "_SYSTEMD_USER_SLICE": fieldConversion{"systemd.user_slice", false, false}, + "_UDEV_DEVLINK": fieldConversion{"journald.kernel.device_symlinks", false, false}, // TODO aggregate multiple elements + "_UDEV_DEVNODE": fieldConversion{"journald.kernel.device_node_path", false, false}, + "_UDEV_SYSNAME": fieldConversion{"journald.kernel.device_name", false, false}, + sdjournal.SD_JOURNAL_FIELD_AUDIT_LOGINUID: fieldConversion{"process.audit.login_uid", true, false}, + sdjournal.SD_JOURNAL_FIELD_AUDIT_SESSION: fieldConversion{"process.audit.session", false, false}, + sdjournal.SD_JOURNAL_FIELD_BOOT_ID: fieldConversion{"host.boot_id", false, false}, + sdjournal.SD_JOURNAL_FIELD_CAP_EFFECTIVE: fieldConversion{"process.capabilites", false, false}, + sdjournal.SD_JOURNAL_FIELD_CMDLINE: fieldConversion{"process.cmd", false, false}, + sdjournal.SD_JOURNAL_FIELD_CODE_FILE: fieldConversion{"journald.code.file", false, false}, + sdjournal.SD_JOURNAL_FIELD_CODE_FUNC: fieldConversion{"journald.code.func", false, false}, + sdjournal.SD_JOURNAL_FIELD_CODE_LINE: fieldConversion{"journald.code.line", true, false}, + sdjournal.SD_JOURNAL_FIELD_COMM: fieldConversion{"process.name", false, false}, + sdjournal.SD_JOURNAL_FIELD_EXE: fieldConversion{"process.executable", false, false}, + sdjournal.SD_JOURNAL_FIELD_GID: fieldConversion{"process.uid", true, false}, + sdjournal.SD_JOURNAL_FIELD_HOSTNAME: fieldConversion{"host.name", false, false}, + sdjournal.SD_JOURNAL_FIELD_MACHINE_ID: fieldConversion{"host.id", false, false}, + sdjournal.SD_JOURNAL_FIELD_MESSAGE: fieldConversion{"message", false, false}, + sdjournal.SD_JOURNAL_FIELD_PID: fieldConversion{"process.pid", true, false}, + sdjournal.SD_JOURNAL_FIELD_PRIORITY: fieldConversion{"syslog.priority", true, false}, + sdjournal.SD_JOURNAL_FIELD_SYSLOG_FACILITY: fieldConversion{"syslog.facility", true, false}, + sdjournal.SD_JOURNAL_FIELD_SYSLOG_IDENTIFIER: fieldConversion{"syslog.identifier", false, false}, + sdjournal.SD_JOURNAL_FIELD_SYSLOG_PID: fieldConversion{"syslog.pid", true, false}, + sdjournal.SD_JOURNAL_FIELD_SYSTEMD_CGROUP: fieldConversion{"systemd.cgroup", false, false}, + sdjournal.SD_JOURNAL_FIELD_SYSTEMD_OWNER_UID: fieldConversion{"systemd.owner_uid", true, false}, + sdjournal.SD_JOURNAL_FIELD_SYSTEMD_SESSION: fieldConversion{"systemd.session", false, false}, + sdjournal.SD_JOURNAL_FIELD_SYSTEMD_SLICE: fieldConversion{"systemd.slice", false, false}, + sdjournal.SD_JOURNAL_FIELD_SYSTEMD_UNIT: fieldConversion{"systemd.unit", false, false}, + sdjournal.SD_JOURNAL_FIELD_SYSTEMD_USER_UNIT: fieldConversion{"systemd.user_unit", false, false}, + sdjournal.SD_JOURNAL_FIELD_TRANSPORT: fieldConversion{"systemd.transport", false, false}, + sdjournal.SD_JOURNAL_FIELD_UID: fieldConversion{"process.uid", true, false}, + + // docker journald fields from: https://docs.docker.com/config/containers/logging/journald/ + "CONTAINER_ID": fieldConversion{"conatiner.id_truncated", false, false}, + "CONTAINER_ID_FULL": fieldConversion{"container.id", false, false}, + "CONTAINER_NAME": fieldConversion{"container.name", false, false}, + "CONTAINER_TAG": fieldConversion{"container.image.tag", false, false}, + "CONTAINER_PARTIAL_MESSAGE": fieldConversion{"container.partial", false, false}, + + // dropped fields + sdjournal.SD_JOURNAL_FIELD_MONOTONIC_TIMESTAMP: fieldConversion{"", false, true}, // saved in the registry + sdjournal.SD_JOURNAL_FIELD_SOURCE_REALTIME_TIMESTAMP: fieldConversion{"", false, true}, // saved in the registry + sdjournal.SD_JOURNAL_FIELD_CURSOR: fieldConversion{"", false, true}, // saved in the registry + "_SOURCE_MONOTONIC_TIMESTAMP": fieldConversion{"", false, true}, // received timestamp stored in @timestamp + } +) diff --git a/journalbeat/reader/journal.go b/journalbeat/reader/journal.go new file mode 100644 index 00000000000..756b2087ec2 --- /dev/null +++ b/journalbeat/reader/journal.go @@ -0,0 +1,279 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package reader + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/coreos/go-systemd/sdjournal" + "github.com/pkg/errors" + + "github.com/elastic/beats/journalbeat/checkpoint" + "github.com/elastic/beats/journalbeat/cmd/instance" + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" +) + +const ( + // LocalSystemJournalID is the ID of the local system journal. + LocalSystemJournalID = "LOCAL_SYSTEM_JOURNAL" +) + +// Config stores the options of a reder. +type Config struct { + // Path is the path to the journal file. + Path string + // Seek specifies the seeking stategy. + // Possible values: head, tail, cursor. + Seek string + // MaxBackoff is the limit of the backoff time. + MaxBackoff time.Duration + // Backoff is the current interval to wait before + // attemting to read again from the journal. + Backoff time.Duration + // Matches store the key value pairs to match entries. + Matches []string +} + +// Reader reads entries from journal(s). +type Reader struct { + journal *sdjournal.Journal + config Config + done chan struct{} + logger *logp.Logger + backoff *common.Backoff +} + +// New creates a new journal reader and moves the FP to the configured position. +func New(c Config, done chan struct{}, state checkpoint.JournalState, logger *logp.Logger) (*Reader, error) { + f, err := os.Stat(c.Path) + if err != nil { + return nil, errors.Wrap(err, "failed to open file") + } + + var j *sdjournal.Journal + if f.IsDir() { + j, err = sdjournal.NewJournalFromDir(c.Path) + if err != nil { + return nil, errors.Wrap(err, "failed to open journal directory") + } + } else { + j, err = sdjournal.NewJournalFromFiles(c.Path) + if err != nil { + return nil, errors.Wrap(err, "failed to open journal file") + } + } + + l := logger.With("path", c.Path) + l.Debug("New journal is opened for reading") + + return newReader(l, done, c, j, state) +} + +// NewLocal creates a reader to read form the local journal and moves the FP +// to the configured position. +func NewLocal(c Config, done chan struct{}, state checkpoint.JournalState, logger *logp.Logger) (*Reader, error) { + j, err := sdjournal.NewJournal() + if err != nil { + return nil, errors.Wrap(err, "failed to open local journal") + } + + l := logger.With("path", "local") + l.Debug("New local journal is opened for reading") + + return newReader(l, done, c, j, state) +} + +func newReader(logger *logp.Logger, done chan struct{}, c Config, journal *sdjournal.Journal, state checkpoint.JournalState) (*Reader, error) { + err := setupMatches(journal, c.Matches) + if err != nil { + return nil, err + } + + r := &Reader{ + journal: journal, + config: c, + done: done, + logger: logger, + backoff: common.NewBackoff(done, c.Backoff, c.MaxBackoff), + } + r.seek(state.Cursor) + + instance.AddJournalToMonitor(c.Path, journal) + + return r, nil +} + +func setupMatches(j *sdjournal.Journal, matches []string) error { + for _, m := range matches { + elems := strings.Split(m, "=") + if len(elems) != 2 { + return fmt.Errorf("invalid match format: %s", m) + } + + var p string + for journalKey, eventField := range journaldEventFields { + if elems[0] == eventField.name { + p = journalKey + "=" + elems[1] + } + } + + // pass custom fields as is + if p == "" { + p = m + } + + logp.Debug("journal", "Added matcher expression: %s", p) + + err := j.AddMatch(p) + if err != nil { + return fmt.Errorf("error adding match to journal %v", err) + } + + err = j.AddDisjunction() + if err != nil { + return fmt.Errorf("error adding disjunction to journal: %v", err) + } + } + return nil +} + +// seek seeks to the position determined by the coniguration and cursor state. +func (r *Reader) seek(cursor string) { + if r.config.Seek == "cursor" { + if cursor == "" { + r.journal.SeekHead() + r.logger.Debug("Seeking method set to cursor, but no state is saved for reader. Starting to read from the beginning") + return + } + r.journal.SeekCursor(cursor) + _, err := r.journal.Next() + if err != nil { + r.logger.Error("Error while seeking to cursor") + } + r.logger.Debug("Seeked to position defined in cursor") + } else if r.config.Seek == "tail" { + r.journal.SeekTail() + r.logger.Debug("Tailing the journal file") + } else if r.config.Seek == "head" { + r.journal.SeekHead() + r.logger.Debug("Reading from the beginning of the journal file") + } +} + +// Next waits until a new event shows up and returns it. +// It blocks until an event is returned or an error occurs. +func (r *Reader) Next() (*beat.Event, error) { + for { + select { + case <-r.done: + return nil, nil + default: + event, err := r.readEvent() + if err != nil { + return nil, err + } + + if event == nil { + r.backoff.Wait() + continue + } + + r.backoff.Reset() + return event, nil + } + } +} + +func (r *Reader) readEvent() (*beat.Event, error) { + n, err := r.journal.Next() + if err != nil && err != io.EOF { + return nil, err + } + + for n == 1 { + entry, err := r.journal.GetEntry() + if err != nil { + return nil, err + } + event := r.toEvent(entry) + return event, nil + } + return nil, nil +} + +// toEvent creates a beat.Event from journal entries. +func (r *Reader) toEvent(entry *sdjournal.JournalEntry) *beat.Event { + fields := common.MapStr{} + custom := common.MapStr{} + + for entryKey, v := range entry.Fields { + if fieldConversionInfo, ok := journaldEventFields[entryKey]; !ok { + normalized := strings.ToLower(strings.TrimLeft(entryKey, "_")) + custom.Put(normalized, v) + } else if !fieldConversionInfo.dropped { + value := r.convertNamedField(fieldConversionInfo, v) + fields.Put(fieldConversionInfo.name, value) + } + } + + if len(custom) != 0 { + fields["custom"] = custom + } + + state := checkpoint.JournalState{ + Path: r.config.Path, + Cursor: entry.Cursor, + RealtimeTimestamp: entry.RealtimeTimestamp, + MonotonicTimestamp: entry.MonotonicTimestamp, + } + + fields["read_timestamp"] = time.Now() + receivedByJournal := time.Unix(0, int64(entry.RealtimeTimestamp)*1000) + + event := beat.Event{ + Timestamp: receivedByJournal, + Fields: fields, + Private: state, + } + return &event +} + +func (r *Reader) convertNamedField(fc fieldConversion, value string) interface{} { + if fc.isInteger { + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + r.logger.Debugf("Failed to convert field: %s \"%v\" to int: %v", fc.name, value, err) + return value + } + return v + } + return value +} + +// Close closes the underlying journal reader. +func (r *Reader) Close() { + instance.StopMonitoringJournal(r.config.Path) + r.journal.Close() +} diff --git a/journalbeat/reader/journal_test.go b/journalbeat/reader/journal_test.go new file mode 100644 index 00000000000..2e7da74aa9a --- /dev/null +++ b/journalbeat/reader/journal_test.go @@ -0,0 +1,123 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package reader + +import ( + "reflect" + "testing" + + "github.com/coreos/go-systemd/sdjournal" + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/journalbeat/checkpoint" + "github.com/elastic/beats/journalbeat/cmd/instance" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" +) + +type ToEventTestCase struct { + entry sdjournal.JournalEntry + expectedFields common.MapStr +} + +type SetupMatchesTestCase struct { + matches []string + expectError bool +} + +func TestToEvent(t *testing.T) { + tests := []ToEventTestCase{ + // field name from fields.go + ToEventTestCase{ + entry: sdjournal.JournalEntry{ + Fields: map[string]string{ + sdjournal.SD_JOURNAL_FIELD_BOOT_ID: "123456", + }, + }, + expectedFields: common.MapStr{ + "host": common.MapStr{ + "boot_id": "123456", + }, + }, + }, + // custom field + ToEventTestCase{ + entry: sdjournal.JournalEntry{ + Fields: map[string]string{ + "my_custom_field": "value", + }, + }, + expectedFields: common.MapStr{ + "custom": common.MapStr{ + "my_custom_field": "value", + }, + }, + }, + // dropped field + ToEventTestCase{ + entry: sdjournal.JournalEntry{ + Fields: map[string]string{ + "_SOURCE_MONOTONIC_TIMESTAMP": "value", + }, + }, + expectedFields: common.MapStr{}, + }, + } + + instance.SetupJournalMetrics() + r, err := NewLocal(Config{Path: "dummy.journal"}, nil, checkpoint.JournalState{}, logp.NewLogger("test")) + if err != nil { + t.Fatalf("error creating test journal: %v", err) + } + for _, test := range tests { + event := r.toEvent(&test.entry) + delete(event.Fields, "read_timestamp") + assert.True(t, reflect.DeepEqual(event.Fields, test.expectedFields)) + } +} + +func TestSetupMatches(t *testing.T) { + tests := []SetupMatchesTestCase{ + // correct filter expression + SetupMatchesTestCase{ + matches: []string{"systemd.unit=nginx"}, + expectError: false, + }, + // custom field + SetupMatchesTestCase{ + matches: []string{"_MY_CUSTOM_FIELD=value"}, + expectError: false, + }, + // incorrect separator + SetupMatchesTestCase{ + matches: []string{"systemd.unit~nginx"}, + expectError: true, + }, + } + journal, err := sdjournal.NewJournal() + if err != nil { + t.Fatalf("error while creating test journal: %v", err) + } + + for _, test := range tests { + err = setupMatches(journal, test.matches) + if err != nil && !test.expectError { + t.Errorf("unexpected outcome of setupMatches: error: '%v', expected error: %v", err, test.expectError) + } + } +} diff --git a/journalbeat/tests/system/config/journalbeat.yml.j2 b/journalbeat/tests/system/config/journalbeat.yml.j2 new file mode 100644 index 00000000000..ca8cc1d862c --- /dev/null +++ b/journalbeat/tests/system/config/journalbeat.yml.j2 @@ -0,0 +1,82 @@ +################### Beat Configuration ######################### +journalbeat.inputs: +- paths: [{{ journal_path }}] + seek: {{ seek_method }} + matches: [{{ matches }}] + +journalbeat.registry: {{ registry_file }} + +############################# Output ########################################## + +# Configure what outputs to use when sending the data collected by the beat. +# You can enable one or multiple outputs by setting enabled option to true. +output: + + ### File as output + file: + # Enabling file output + enabled: true + + # Path to the directory where to save the generated files. The option is mandatory. + path: {{ output_file_path|default(beat.working_dir + "/output") }} + + + # Name of the generated files. The default is `journalbeat` and it generates + # files: `journalbeat`, `journalbeat.1`, `journalbeat.2`, etc. + filename: {{ output_file_filename|default("journalbeat") }} + + # Maximum size in kilobytes of each file. When this size is reached, the files are + # rotated. The default value is 10 MB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, the + # oldest file is deleted and the rest are shifted from last to first. The default + # is 7 files. + #number_of_files: 7 + + + +############################# Beat ######################################### + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + + + +############################# Logging ######################################### + +#logging: + # Send all logging output to syslog. On Windows default is false, otherwise + # default is true. + #to_syslog: true + + # Write all logging output to files. Beats automatically rotate files if configurable + # limit is reached. + #to_files: false + + # Enable debug output for selected components. + #selectors: [] + + # Set log level + #level: error + + #files: + # The directory where the log files will written to. + #path: /var/log/journalbeat + + # The name of the files where the logs are written to. + #name: journalbeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + #keepfiles: 7 diff --git a/journalbeat/tests/system/input/test.journal b/journalbeat/tests/system/input/test.journal new file mode 100644 index 00000000000..887d4917905 Binary files /dev/null and b/journalbeat/tests/system/input/test.journal differ diff --git a/journalbeat/tests/system/input/test.registry b/journalbeat/tests/system/input/test.registry new file mode 100644 index 00000000000..5c6680edb42 --- /dev/null +++ b/journalbeat/tests/system/input/test.registry @@ -0,0 +1,6 @@ +update_time: 2018-09-11T10:06:50.895829905Z +journal_entries: +- path: /home/n/go/src/github.com/elastic/beats/journalbeat/tests/system/input/test.journal + cursor: s=7d22fd7aa0c7482d88c303f47d5f32dc;i=2fcb;b=902dc834f07d4f41ade064f6b2ef8b4f;m=1bf0ff5c6d;t=55913a25fe765;x=c7e6480eec30822b + realtime_timestamp: 1505315746998117 + monotonic_timestamp: 120007384173 diff --git a/journalbeat/tests/system/journalbeat.py b/journalbeat/tests/system/journalbeat.py new file mode 100644 index 00000000000..11381395e29 --- /dev/null +++ b/journalbeat/tests/system/journalbeat.py @@ -0,0 +1,13 @@ +import os +import sys +sys.path.append(os.path.join(os.path.dirname(__file__), '../../../libbeat/tests/system')) +from beat.beat import TestCase + + +class BaseTest(TestCase): + + @classmethod + def setUpClass(self): + self.beat_name = "journalbeat" + self.beat_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../")) + super(BaseTest, self).setUpClass() diff --git a/journalbeat/tests/system/test_base.py b/journalbeat/tests/system/test_base.py new file mode 100644 index 00000000000..be68e6c08c3 --- /dev/null +++ b/journalbeat/tests/system/test_base.py @@ -0,0 +1,147 @@ +from journalbeat import BaseTest + +import os +import sys +import unittest +import time + + +class Test(BaseTest): + + @unittest.skipUnless(sys.platform.startswith("linux"), "Journald only on Linux") + def test_start_with_local_journal(self): + """ + Journalbeat is able to start with the local journal. + """ + + self.render_config_template( + path=os.path.abspath(self.working_dir) + "/log/*" + ) + journalbeat_proc = self.start_beat() + + self.wait_until(lambda: self.log_contains( + "journalbeat is running"), max_timeout=10) + + exit_code = journalbeat_proc.kill_and_wait() + assert exit_code == 0 + + @unittest.skipUnless(sys.platform.startswith("linux"), "Journald only on Linux") + def test_start_with_journal_directory(self): + """ + Journalbeat is able to open a directory of journal files and starts tailing them. + """ + + self.render_config_template( + journal_path=self.beat_path + "/tests/system/input/", + path=os.path.abspath(self.working_dir) + "/log/*" + ) + journalbeat_proc = self.start_beat() + + required_log_snippets = [ + # journalbeat can be started + "journalbeat is running", + # journalbeat can seek to the position defined in the cursor + "Tailing the journal file", + ] + for snippet in required_log_snippets: + self.wait_until(lambda: self.log_contains(snippet), + name="Line in '{}' Journalbeat log".format(snippet)) + + exit_code = journalbeat_proc.kill_and_wait() + assert exit_code == 0 + + @unittest.skipUnless(sys.platform.startswith("linux"), "Journald only on Linux") + def test_start_with_selected_journal_file(self): + """ + Journalbeat is able to open a journal file and start to read it from the begining. + """ + + self.render_config_template( + journal_path=self.beat_path + "/tests/system/input/test.journal", + seek_method="head", + path=os.path.abspath(self.working_dir) + "/log/*" + ) + journalbeat_proc = self.start_beat() + + required_log_snippets = [ + # journalbeat can be started + "journalbeat is running", + # journalbeat can seek to the position defined in the cursor + "Reading from the beginning of the journal file", + # message can be read from test journal + "\"message\": \"thinkpad_acpi: unhandled HKEY event 0x60b0\"", + ] + for snippet in required_log_snippets: + self.wait_until(lambda: self.log_contains(snippet), + name="Line in '{}' Journalbeat log".format(snippet)) + + exit_code = journalbeat_proc.kill_and_wait() + assert exit_code == 0 + + @unittest.skipUnless(sys.platform.startswith("linux"), "Journald only on Linux") + def test_read_events_with_existing_registry(self): + """ + Journalbeat is able to follow reading a from a journal with an existing registry file. + """ + + self.render_config_template( + journal_path=self.beat_path + "/tests/system/input/test.journal", + seek_method="cursor", + registry_file=self.beat_path + "/tests/system/input/test.registry", + path=os.path.abspath(self.working_dir) + "/log/*", + ) + journalbeat_proc = self.start_beat() + + required_log_snippets = [ + # journalbeat can be started + "journalbeat is running", + # journalbeat can seek to the position defined in the cursor + "Seeked to position defined in cursor", + # message can be read from test journal + "please report the conditions when this event happened to", + # only one event is read and published + "journalbeat successfully published 1 events", + ] + for snippet in required_log_snippets: + self.wait_until(lambda: self.log_contains(snippet), + name="Line in '{}' Journalbeat log".format(snippet)) + + exit_code = journalbeat_proc.kill_and_wait() + assert exit_code == 0 + + @unittest.skipUnless(sys.platform.startswith("linux"), "Journald only on Linux") + def test_read_events_with_existing_registry(self): + """ + Journalbeat is able to pass matchers to the journal reader and read filtered messages. + """ + + self.render_config_template( + journal_path=self.beat_path + "/tests/system/input/test.journal", + seek_method="head", + matches="syslog.priority=5", + path=os.path.abspath(self.working_dir) + "/log/*", + ) + journalbeat_proc = self.start_beat() + + required_log_snippets = [ + # journalbeat can be started + "journalbeat is running", + # journalbeat can seek to the position defined in the cursor + "Added matcher expression", + # message can be read from test journal + "unhandled HKEY event 0x60b0", + "please report the conditions when this event happened to", + "unhandled HKEY event 0x60b1", + # Four events with priority 5 is publised + "journalbeat successfully published 4 events", + ] + for snippet in required_log_snippets: + self.wait_until(lambda: self.log_contains(snippet), + name="Line in '{}' Journalbeat log".format(snippet)) + + exit_code = journalbeat_proc.kill_and_wait() + assert exit_code == 0 + + +if __name__ == '__main__': + unittest.main() diff --git a/libbeat/docs/shared-configuring.asciidoc b/libbeat/docs/shared-configuring.asciidoc index d1d2c9428c0..c4008c4dc7f 100644 --- a/libbeat/docs/shared-configuring.asciidoc +++ b/libbeat/docs/shared-configuring.asciidoc @@ -1,8 +1,15 @@ +ifndef::no-docker[] To configure {beatname_uc}, you edit the configuration file. For rpm and deb, you'll find the configuration file at +/etc/{beatname_lc}/{beatname_lc}.yml+. Under Docker, it's located at +/usr/share/{beatname_lc}/{beatname_lc}.yml+. For mac and win, -look in the archive that you just extracted. +look in the archive that you just extracted. +endif::[] +ifdef::no-docker[] +To configure {beatname_uc}, you edit the configuration file. For rpm and deb, +you'll find the configuration file at +/etc/{beatname_lc}/{beatname_lc}.yml+. +For mac and win, look in the archive that you just extracted. +endif::[] ifeval::["{beatname_lc}"!="apm-server"] There’s also a full example configuration file called +{beatname_lc}.reference.yml+ that shows all non-deprecated options. diff --git a/libbeat/docs/shared-download-and-install.asciidoc b/libbeat/docs/shared-download-and-install.asciidoc index fe719f64d2c..0e3d608b9ec 100644 --- a/libbeat/docs/shared-download-and-install.asciidoc +++ b/libbeat/docs/shared-download-and-install.asciidoc @@ -2,10 +2,19 @@ *Before you begin*: If you haven't installed the {stack}, do that now. See {stack-gs}/get-started-elastic-stack.html[Getting started with the {stack}]. +ifndef::no-docker[] To download and install {beatname_uc}, use the commands that work with your system (<> for Debian/Ubuntu, <> for Redhat/Centos/Fedora, <> for OS X, <> for any Docker platform, and <> for Windows). +endif::[] + +ifdef::no-docker[] +To download and install {beatname_uc}, use the commands that work with your system +(<> for Debian/Ubuntu, <> for Redhat/Centos/Fedora, <> for OS X, and <> for Windows). +endif::[] + [NOTE] ================================================== diff --git a/libbeat/scripts/Makefile b/libbeat/scripts/Makefile index f04ee8858c7..941ba3d5a67 100755 --- a/libbeat/scripts/Makefile +++ b/libbeat/scripts/Makefile @@ -107,9 +107,11 @@ ${BEAT_NAME}.test: $(GOFILES_ALL) .PHONY: crosscompile crosscompile: ## @build Cross-compile beat for the OS'es specified in GOX_OS variable. The binaries are placed in the build/bin directory. crosscompile: $(GOFILES) +ifneq ($(shell [[ $(BEAT_NAME) == journalbeat ]] && echo true ),true) go get github.com/mitchellh/gox mkdir -p ${BUILD_DIR}/bin gox -output="${BUILD_DIR}/bin/{{.Dir}}-{{.OS}}-{{.Arch}}" -os="$(strip $(GOX_OS))" -osarch="$(strip $(GOX_OSARCH))" ${GOX_FLAGS} +endif .PHONY: check check: check-headers python-env prepare-tests ## @build Checks project and source code if everything is according to standard diff --git a/magefile.go b/magefile.go index bd6d744afcb..6b228a34da2 100644 --- a/magefile.go +++ b/magefile.go @@ -31,6 +31,7 @@ var ( "auditbeat", "filebeat", "heartbeat", + "journalbeat", "metricbeat", "packetbeat", "winlogbeat", diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 00000000000..37ec93a14fd --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/NOTICE b/vendor/github.com/coreos/go-systemd/NOTICE new file mode 100644 index 00000000000..23a0ada2fbb --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-systemd/sdjournal/functions.go b/vendor/github.com/coreos/go-systemd/sdjournal/functions.go new file mode 100644 index 00000000000..e132369c127 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/sdjournal/functions.go @@ -0,0 +1,66 @@ +// Copyright 2015 RedHat, Inc. +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sdjournal + +import ( + "github.com/coreos/pkg/dlopen" + "sync" + "unsafe" +) + +var ( + // lazy initialized + libsystemdHandle *dlopen.LibHandle + + libsystemdMutex = &sync.Mutex{} + libsystemdFunctions = map[string]unsafe.Pointer{} + libsystemdNames = []string{ + // systemd < 209 + "libsystemd-journal.so.0", + "libsystemd-journal.so", + + // systemd >= 209 merged libsystemd-journal into libsystemd proper + "libsystemd.so.0", + "libsystemd.so", + } +) + +func getFunction(name string) (unsafe.Pointer, error) { + libsystemdMutex.Lock() + defer libsystemdMutex.Unlock() + + if libsystemdHandle == nil { + h, err := dlopen.GetHandle(libsystemdNames) + if err != nil { + return nil, err + } + + libsystemdHandle = h + } + + f, ok := libsystemdFunctions[name] + if !ok { + var err error + f, err = libsystemdHandle.GetSymbolPointer(name) + if err != nil { + return nil, err + } + + libsystemdFunctions[name] = f + } + + return f, nil +} diff --git a/vendor/github.com/coreos/go-systemd/sdjournal/journal.go b/vendor/github.com/coreos/go-systemd/sdjournal/journal.go new file mode 100644 index 00000000000..9f3d9234239 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/sdjournal/journal.go @@ -0,0 +1,1120 @@ +// Copyright 2015 RedHat, Inc. +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sdjournal provides a low-level Go interface to the +// systemd journal wrapped around the sd-journal C API. +// +// All public read methods map closely to the sd-journal API functions. See the +// sd-journal.h documentation[1] for information about each function. +// +// To write to the journal, see the pure-Go "journal" package +// +// [1] http://www.freedesktop.org/software/systemd/man/sd-journal.html +package sdjournal + +// #include +// #include +// #include +// #include +// +// int +// my_sd_journal_open(void *f, sd_journal **ret, int flags) +// { +// int (*sd_journal_open)(sd_journal **, int); +// +// sd_journal_open = f; +// return sd_journal_open(ret, flags); +// } +// +// int +// my_sd_journal_open_directory(void *f, sd_journal **ret, const char *path, int flags) +// { +// int (*sd_journal_open_directory)(sd_journal **, const char *, int); +// +// sd_journal_open_directory = f; +// return sd_journal_open_directory(ret, path, flags); +// } +// +// int +// my_sd_journal_open_files(void *f, sd_journal **ret, const char **paths, int flags) +// { +// int (*sd_journal_open_files)(sd_journal **, const char **, int); +// +// sd_journal_open_files = f; +// return sd_journal_open_files(ret, paths, flags); +// } +// +// void +// my_sd_journal_close(void *f, sd_journal *j) +// { +// int (*sd_journal_close)(sd_journal *); +// +// sd_journal_close = f; +// sd_journal_close(j); +// } +// +// int +// my_sd_journal_get_usage(void *f, sd_journal *j, uint64_t *bytes) +// { +// int (*sd_journal_get_usage)(sd_journal *, uint64_t *); +// +// sd_journal_get_usage = f; +// return sd_journal_get_usage(j, bytes); +// } +// +// int +// my_sd_journal_add_match(void *f, sd_journal *j, const void *data, size_t size) +// { +// int (*sd_journal_add_match)(sd_journal *, const void *, size_t); +// +// sd_journal_add_match = f; +// return sd_journal_add_match(j, data, size); +// } +// +// int +// my_sd_journal_add_disjunction(void *f, sd_journal *j) +// { +// int (*sd_journal_add_disjunction)(sd_journal *); +// +// sd_journal_add_disjunction = f; +// return sd_journal_add_disjunction(j); +// } +// +// int +// my_sd_journal_add_conjunction(void *f, sd_journal *j) +// { +// int (*sd_journal_add_conjunction)(sd_journal *); +// +// sd_journal_add_conjunction = f; +// return sd_journal_add_conjunction(j); +// } +// +// void +// my_sd_journal_flush_matches(void *f, sd_journal *j) +// { +// int (*sd_journal_flush_matches)(sd_journal *); +// +// sd_journal_flush_matches = f; +// sd_journal_flush_matches(j); +// } +// +// int +// my_sd_journal_next(void *f, sd_journal *j) +// { +// int (*sd_journal_next)(sd_journal *); +// +// sd_journal_next = f; +// return sd_journal_next(j); +// } +// +// int +// my_sd_journal_next_skip(void *f, sd_journal *j, uint64_t skip) +// { +// int (*sd_journal_next_skip)(sd_journal *, uint64_t); +// +// sd_journal_next_skip = f; +// return sd_journal_next_skip(j, skip); +// } +// +// int +// my_sd_journal_previous(void *f, sd_journal *j) +// { +// int (*sd_journal_previous)(sd_journal *); +// +// sd_journal_previous = f; +// return sd_journal_previous(j); +// } +// +// int +// my_sd_journal_previous_skip(void *f, sd_journal *j, uint64_t skip) +// { +// int (*sd_journal_previous_skip)(sd_journal *, uint64_t); +// +// sd_journal_previous_skip = f; +// return sd_journal_previous_skip(j, skip); +// } +// +// int +// my_sd_journal_get_data(void *f, sd_journal *j, const char *field, const void **data, size_t *length) +// { +// int (*sd_journal_get_data)(sd_journal *, const char *, const void **, size_t *); +// +// sd_journal_get_data = f; +// return sd_journal_get_data(j, field, data, length); +// } +// +// int +// my_sd_journal_set_data_threshold(void *f, sd_journal *j, size_t sz) +// { +// int (*sd_journal_set_data_threshold)(sd_journal *, size_t); +// +// sd_journal_set_data_threshold = f; +// return sd_journal_set_data_threshold(j, sz); +// } +// +// int +// my_sd_journal_get_cursor(void *f, sd_journal *j, char **cursor) +// { +// int (*sd_journal_get_cursor)(sd_journal *, char **); +// +// sd_journal_get_cursor = f; +// return sd_journal_get_cursor(j, cursor); +// } +// +// int +// my_sd_journal_test_cursor(void *f, sd_journal *j, const char *cursor) +// { +// int (*sd_journal_test_cursor)(sd_journal *, const char *); +// +// sd_journal_test_cursor = f; +// return sd_journal_test_cursor(j, cursor); +// } +// +// int +// my_sd_journal_get_realtime_usec(void *f, sd_journal *j, uint64_t *usec) +// { +// int (*sd_journal_get_realtime_usec)(sd_journal *, uint64_t *); +// +// sd_journal_get_realtime_usec = f; +// return sd_journal_get_realtime_usec(j, usec); +// } +// +// int +// my_sd_journal_get_monotonic_usec(void *f, sd_journal *j, uint64_t *usec, sd_id128_t *boot_id) +// { +// int (*sd_journal_get_monotonic_usec)(sd_journal *, uint64_t *, sd_id128_t *); +// +// sd_journal_get_monotonic_usec = f; +// return sd_journal_get_monotonic_usec(j, usec, boot_id); +// } +// +// int +// my_sd_journal_seek_head(void *f, sd_journal *j) +// { +// int (*sd_journal_seek_head)(sd_journal *); +// +// sd_journal_seek_head = f; +// return sd_journal_seek_head(j); +// } +// +// int +// my_sd_journal_seek_tail(void *f, sd_journal *j) +// { +// int (*sd_journal_seek_tail)(sd_journal *); +// +// sd_journal_seek_tail = f; +// return sd_journal_seek_tail(j); +// } +// +// +// int +// my_sd_journal_seek_cursor(void *f, sd_journal *j, const char *cursor) +// { +// int (*sd_journal_seek_cursor)(sd_journal *, const char *); +// +// sd_journal_seek_cursor = f; +// return sd_journal_seek_cursor(j, cursor); +// } +// +// int +// my_sd_journal_seek_realtime_usec(void *f, sd_journal *j, uint64_t usec) +// { +// int (*sd_journal_seek_realtime_usec)(sd_journal *, uint64_t); +// +// sd_journal_seek_realtime_usec = f; +// return sd_journal_seek_realtime_usec(j, usec); +// } +// +// int +// my_sd_journal_wait(void *f, sd_journal *j, uint64_t timeout_usec) +// { +// int (*sd_journal_wait)(sd_journal *, uint64_t); +// +// sd_journal_wait = f; +// return sd_journal_wait(j, timeout_usec); +// } +// +// void +// my_sd_journal_restart_data(void *f, sd_journal *j) +// { +// void (*sd_journal_restart_data)(sd_journal *); +// +// sd_journal_restart_data = f; +// sd_journal_restart_data(j); +// } +// +// int +// my_sd_journal_enumerate_data(void *f, sd_journal *j, const void **data, size_t *length) +// { +// int (*sd_journal_enumerate_data)(sd_journal *, const void **, size_t *); +// +// sd_journal_enumerate_data = f; +// return sd_journal_enumerate_data(j, data, length); +// } +// +// int +// my_sd_journal_query_unique(void *f, sd_journal *j, const char *field) +// { +// int(*sd_journal_query_unique)(sd_journal *, const char *); +// +// sd_journal_query_unique = f; +// return sd_journal_query_unique(j, field); +// } +// +// int +// my_sd_journal_enumerate_unique(void *f, sd_journal *j, const void **data, size_t *length) +// { +// int(*sd_journal_enumerate_unique)(sd_journal *, const void **, size_t *); +// +// sd_journal_enumerate_unique = f; +// return sd_journal_enumerate_unique(j, data, length); +// } +// +// void +// my_sd_journal_restart_unique(void *f, sd_journal *j) +// { +// void(*sd_journal_restart_unique)(sd_journal *); +// +// sd_journal_restart_unique = f; +// sd_journal_restart_unique(j); +// } +// +// int +// my_sd_journal_get_catalog(void *f, sd_journal *j, char **ret) +// { +// int(*sd_journal_get_catalog)(sd_journal *, char **); +// +// sd_journal_get_catalog = f; +// return sd_journal_get_catalog(j, ret); +// } +// +import "C" +import ( + "bytes" + "errors" + "fmt" + "strings" + "sync" + "syscall" + "time" + "unsafe" +) + +// Journal entry field strings which correspond to: +// http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html +const ( + // User Journal Fields + SD_JOURNAL_FIELD_MESSAGE = "MESSAGE" + SD_JOURNAL_FIELD_MESSAGE_ID = "MESSAGE_ID" + SD_JOURNAL_FIELD_PRIORITY = "PRIORITY" + SD_JOURNAL_FIELD_CODE_FILE = "CODE_FILE" + SD_JOURNAL_FIELD_CODE_LINE = "CODE_LINE" + SD_JOURNAL_FIELD_CODE_FUNC = "CODE_FUNC" + SD_JOURNAL_FIELD_ERRNO = "ERRNO" + SD_JOURNAL_FIELD_SYSLOG_FACILITY = "SYSLOG_FACILITY" + SD_JOURNAL_FIELD_SYSLOG_IDENTIFIER = "SYSLOG_IDENTIFIER" + SD_JOURNAL_FIELD_SYSLOG_PID = "SYSLOG_PID" + + // Trusted Journal Fields + SD_JOURNAL_FIELD_PID = "_PID" + SD_JOURNAL_FIELD_UID = "_UID" + SD_JOURNAL_FIELD_GID = "_GID" + SD_JOURNAL_FIELD_COMM = "_COMM" + SD_JOURNAL_FIELD_EXE = "_EXE" + SD_JOURNAL_FIELD_CMDLINE = "_CMDLINE" + SD_JOURNAL_FIELD_CAP_EFFECTIVE = "_CAP_EFFECTIVE" + SD_JOURNAL_FIELD_AUDIT_SESSION = "_AUDIT_SESSION" + SD_JOURNAL_FIELD_AUDIT_LOGINUID = "_AUDIT_LOGINUID" + SD_JOURNAL_FIELD_SYSTEMD_CGROUP = "_SYSTEMD_CGROUP" + SD_JOURNAL_FIELD_SYSTEMD_SESSION = "_SYSTEMD_SESSION" + SD_JOURNAL_FIELD_SYSTEMD_UNIT = "_SYSTEMD_UNIT" + SD_JOURNAL_FIELD_SYSTEMD_USER_UNIT = "_SYSTEMD_USER_UNIT" + SD_JOURNAL_FIELD_SYSTEMD_OWNER_UID = "_SYSTEMD_OWNER_UID" + SD_JOURNAL_FIELD_SYSTEMD_SLICE = "_SYSTEMD_SLICE" + SD_JOURNAL_FIELD_SELINUX_CONTEXT = "_SELINUX_CONTEXT" + SD_JOURNAL_FIELD_SOURCE_REALTIME_TIMESTAMP = "_SOURCE_REALTIME_TIMESTAMP" + SD_JOURNAL_FIELD_BOOT_ID = "_BOOT_ID" + SD_JOURNAL_FIELD_MACHINE_ID = "_MACHINE_ID" + SD_JOURNAL_FIELD_HOSTNAME = "_HOSTNAME" + SD_JOURNAL_FIELD_TRANSPORT = "_TRANSPORT" + + // Address Fields + SD_JOURNAL_FIELD_CURSOR = "__CURSOR" + SD_JOURNAL_FIELD_REALTIME_TIMESTAMP = "__REALTIME_TIMESTAMP" + SD_JOURNAL_FIELD_MONOTONIC_TIMESTAMP = "__MONOTONIC_TIMESTAMP" +) + +// Journal event constants +const ( + SD_JOURNAL_NOP = int(C.SD_JOURNAL_NOP) + SD_JOURNAL_APPEND = int(C.SD_JOURNAL_APPEND) + SD_JOURNAL_INVALIDATE = int(C.SD_JOURNAL_INVALIDATE) +) + +const ( + // IndefiniteWait is a sentinel value that can be passed to + // sdjournal.Wait() to signal an indefinite wait for new journal + // events. It is implemented as the maximum value for a time.Duration: + // https://github.com/golang/go/blob/e4dcf5c8c22d98ac9eac7b9b226596229624cb1d/src/time/time.go#L434 + IndefiniteWait time.Duration = 1<<63 - 1 +) + +var ( + // ErrNoTestCursor gets returned when using TestCursor function and cursor + // parameter is not the same as the current cursor position. + ErrNoTestCursor = errors.New("Cursor parameter is not the same as current position") +) + +// Journal is a Go wrapper of an sd_journal structure. +type Journal struct { + cjournal *C.sd_journal + mu sync.Mutex +} + +// JournalEntry represents all fields of a journal entry plus address fields. +type JournalEntry struct { + Fields map[string]string + Cursor string + RealtimeTimestamp uint64 + MonotonicTimestamp uint64 +} + +// Match is a convenience wrapper to describe filters supplied to AddMatch. +type Match struct { + Field string + Value string +} + +// String returns a string representation of a Match suitable for use with AddMatch. +func (m *Match) String() string { + return m.Field + "=" + m.Value +} + +// NewJournal returns a new Journal instance pointing to the local journal +func NewJournal() (j *Journal, err error) { + j = &Journal{} + + sd_journal_open, err := getFunction("sd_journal_open") + if err != nil { + return nil, err + } + + r := C.my_sd_journal_open(sd_journal_open, &j.cjournal, C.SD_JOURNAL_LOCAL_ONLY) + + if r < 0 { + return nil, fmt.Errorf("failed to open journal: %d", syscall.Errno(-r)) + } + + return j, nil +} + +// NewJournalFromDir returns a new Journal instance pointing to a journal residing +// in a given directory. +func NewJournalFromDir(path string) (j *Journal, err error) { + j = &Journal{} + + sd_journal_open_directory, err := getFunction("sd_journal_open_directory") + if err != nil { + return nil, err + } + + p := C.CString(path) + defer C.free(unsafe.Pointer(p)) + + r := C.my_sd_journal_open_directory(sd_journal_open_directory, &j.cjournal, p, 0) + if r < 0 { + return nil, fmt.Errorf("failed to open journal in directory %q: %d", path, syscall.Errno(-r)) + } + + return j, nil +} + +// NewJournalFromFiles returns a new Journal instance pointing to a journals residing +// in a given files. +func NewJournalFromFiles(paths ...string) (j *Journal, err error) { + j = &Journal{} + + sd_journal_open_files, err := getFunction("sd_journal_open_files") + if err != nil { + return nil, err + } + + // by making the slice 1 elem too long, we guarantee it'll be null-terminated + cPaths := make([]*C.char, len(paths)+1) + for idx, path := range paths { + p := C.CString(path) + cPaths[idx] = p + defer C.free(unsafe.Pointer(p)) + } + + r := C.my_sd_journal_open_files(sd_journal_open_files, &j.cjournal, &cPaths[0], 0) + if r < 0 { + return nil, fmt.Errorf("failed to open journals in paths %q: %d", paths, syscall.Errno(-r)) + } + + return j, nil +} + +// Close closes a journal opened with NewJournal. +func (j *Journal) Close() error { + sd_journal_close, err := getFunction("sd_journal_close") + if err != nil { + return err + } + + j.mu.Lock() + C.my_sd_journal_close(sd_journal_close, j.cjournal) + j.mu.Unlock() + + return nil +} + +// AddMatch adds a match by which to filter the entries of the journal. +func (j *Journal) AddMatch(match string) error { + sd_journal_add_match, err := getFunction("sd_journal_add_match") + if err != nil { + return err + } + + m := C.CString(match) + defer C.free(unsafe.Pointer(m)) + + j.mu.Lock() + r := C.my_sd_journal_add_match(sd_journal_add_match, j.cjournal, unsafe.Pointer(m), C.size_t(len(match))) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to add match: %d", syscall.Errno(-r)) + } + + return nil +} + +// AddDisjunction inserts a logical OR in the match list. +func (j *Journal) AddDisjunction() error { + sd_journal_add_disjunction, err := getFunction("sd_journal_add_disjunction") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_add_disjunction(sd_journal_add_disjunction, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to add a disjunction in the match list: %d", syscall.Errno(-r)) + } + + return nil +} + +// AddConjunction inserts a logical AND in the match list. +func (j *Journal) AddConjunction() error { + sd_journal_add_conjunction, err := getFunction("sd_journal_add_conjunction") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_add_conjunction(sd_journal_add_conjunction, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to add a conjunction in the match list: %d", syscall.Errno(-r)) + } + + return nil +} + +// FlushMatches flushes all matches, disjunctions and conjunctions. +func (j *Journal) FlushMatches() { + sd_journal_flush_matches, err := getFunction("sd_journal_flush_matches") + if err != nil { + return + } + + j.mu.Lock() + C.my_sd_journal_flush_matches(sd_journal_flush_matches, j.cjournal) + j.mu.Unlock() +} + +// Next advances the read pointer into the journal by one entry. +func (j *Journal) Next() (uint64, error) { + sd_journal_next, err := getFunction("sd_journal_next") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_next(sd_journal_next, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to iterate journal: %d", syscall.Errno(-r)) + } + + return uint64(r), nil +} + +// NextSkip advances the read pointer by multiple entries at once, +// as specified by the skip parameter. +func (j *Journal) NextSkip(skip uint64) (uint64, error) { + sd_journal_next_skip, err := getFunction("sd_journal_next_skip") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_next_skip(sd_journal_next_skip, j.cjournal, C.uint64_t(skip)) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to iterate journal: %d", syscall.Errno(-r)) + } + + return uint64(r), nil +} + +// Previous sets the read pointer into the journal back by one entry. +func (j *Journal) Previous() (uint64, error) { + sd_journal_previous, err := getFunction("sd_journal_previous") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_previous(sd_journal_previous, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to iterate journal: %d", syscall.Errno(-r)) + } + + return uint64(r), nil +} + +// PreviousSkip sets back the read pointer by multiple entries at once, +// as specified by the skip parameter. +func (j *Journal) PreviousSkip(skip uint64) (uint64, error) { + sd_journal_previous_skip, err := getFunction("sd_journal_previous_skip") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_previous_skip(sd_journal_previous_skip, j.cjournal, C.uint64_t(skip)) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to iterate journal: %d", syscall.Errno(-r)) + } + + return uint64(r), nil +} + +func (j *Journal) getData(field string) (unsafe.Pointer, C.int, error) { + sd_journal_get_data, err := getFunction("sd_journal_get_data") + if err != nil { + return nil, 0, err + } + + f := C.CString(field) + defer C.free(unsafe.Pointer(f)) + + var d unsafe.Pointer + var l C.size_t + + j.mu.Lock() + r := C.my_sd_journal_get_data(sd_journal_get_data, j.cjournal, f, &d, &l) + j.mu.Unlock() + + if r < 0 { + return nil, 0, fmt.Errorf("failed to read message: %d", syscall.Errno(-r)) + } + + return d, C.int(l), nil +} + +// GetData gets the data object associated with a specific field from the +// the journal entry referenced by the last completed Next/Previous function +// call. To call GetData, you must have first called one of these functions. +func (j *Journal) GetData(field string) (string, error) { + d, l, err := j.getData(field) + if err != nil { + return "", err + } + + return C.GoStringN((*C.char)(d), l), nil +} + +// GetDataValue gets the data object associated with a specific field from the +// journal entry referenced by the last completed Next/Previous function call, +// returning only the value of the object. To call GetDataValue, you must first +// have called one of the Next/Previous functions. +func (j *Journal) GetDataValue(field string) (string, error) { + val, err := j.GetData(field) + if err != nil { + return "", err + } + + return strings.SplitN(val, "=", 2)[1], nil +} + +// GetDataBytes gets the data object associated with a specific field from the +// journal entry referenced by the last completed Next/Previous function call. +// To call GetDataBytes, you must first have called one of these functions. +func (j *Journal) GetDataBytes(field string) ([]byte, error) { + d, l, err := j.getData(field) + if err != nil { + return nil, err + } + + return C.GoBytes(d, l), nil +} + +// GetDataValueBytes gets the data object associated with a specific field from the +// journal entry referenced by the last completed Next/Previous function call, +// returning only the value of the object. To call GetDataValueBytes, you must first +// have called one of the Next/Previous functions. +func (j *Journal) GetDataValueBytes(field string) ([]byte, error) { + val, err := j.GetDataBytes(field) + if err != nil { + return nil, err + } + + return bytes.SplitN(val, []byte("="), 2)[1], nil +} + +// GetEntry returns a full representation of the journal entry referenced by the +// last completed Next/Previous function call, with all key-value pairs of data +// as well as address fields (cursor, realtime timestamp and monotonic timestamp). +// To call GetEntry, you must first have called one of the Next/Previous functions. +func (j *Journal) GetEntry() (*JournalEntry, error) { + sd_journal_get_realtime_usec, err := getFunction("sd_journal_get_realtime_usec") + if err != nil { + return nil, err + } + + sd_journal_get_monotonic_usec, err := getFunction("sd_journal_get_monotonic_usec") + if err != nil { + return nil, err + } + + sd_journal_get_cursor, err := getFunction("sd_journal_get_cursor") + if err != nil { + return nil, err + } + + sd_journal_restart_data, err := getFunction("sd_journal_restart_data") + if err != nil { + return nil, err + } + + sd_journal_enumerate_data, err := getFunction("sd_journal_enumerate_data") + if err != nil { + return nil, err + } + + j.mu.Lock() + defer j.mu.Unlock() + + var r C.int + entry := &JournalEntry{Fields: make(map[string]string)} + + var realtimeUsec C.uint64_t + r = C.my_sd_journal_get_realtime_usec(sd_journal_get_realtime_usec, j.cjournal, &realtimeUsec) + if r < 0 { + return nil, fmt.Errorf("failed to get realtime timestamp: %d", syscall.Errno(-r)) + } + + entry.RealtimeTimestamp = uint64(realtimeUsec) + + var monotonicUsec C.uint64_t + var boot_id C.sd_id128_t + + r = C.my_sd_journal_get_monotonic_usec(sd_journal_get_monotonic_usec, j.cjournal, &monotonicUsec, &boot_id) + if r < 0 { + return nil, fmt.Errorf("failed to get monotonic timestamp: %d", syscall.Errno(-r)) + } + + entry.MonotonicTimestamp = uint64(monotonicUsec) + + var c *C.char + // since the pointer is mutated by sd_journal_get_cursor, need to wait + // until after the call to free the memory + r = C.my_sd_journal_get_cursor(sd_journal_get_cursor, j.cjournal, &c) + defer C.free(unsafe.Pointer(c)) + if r < 0 { + return nil, fmt.Errorf("failed to get cursor: %d", syscall.Errno(-r)) + } + + entry.Cursor = C.GoString(c) + + // Implements the JOURNAL_FOREACH_DATA_RETVAL macro from journal-internal.h + var d unsafe.Pointer + var l C.size_t + C.my_sd_journal_restart_data(sd_journal_restart_data, j.cjournal) + for { + r = C.my_sd_journal_enumerate_data(sd_journal_enumerate_data, j.cjournal, &d, &l) + if r == 0 { + break + } + + if r < 0 { + return nil, fmt.Errorf("failed to read message field: %d", syscall.Errno(-r)) + } + + msg := C.GoStringN((*C.char)(d), C.int(l)) + kv := strings.SplitN(msg, "=", 2) + if len(kv) < 2 { + return nil, fmt.Errorf("failed to parse field") + } + + entry.Fields[kv[0]] = kv[1] + } + + return entry, nil +} + +// SetDataThreshold sets the data field size threshold for data returned by +// GetData. To retrieve the complete data fields this threshold should be +// turned off by setting it to 0, so that the library always returns the +// complete data objects. +func (j *Journal) SetDataThreshold(threshold uint64) error { + sd_journal_set_data_threshold, err := getFunction("sd_journal_set_data_threshold") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_set_data_threshold(sd_journal_set_data_threshold, j.cjournal, C.size_t(threshold)) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to set data threshold: %d", syscall.Errno(-r)) + } + + return nil +} + +// GetRealtimeUsec gets the realtime (wallclock) timestamp of the journal +// entry referenced by the last completed Next/Previous function call. To +// call GetRealtimeUsec, you must first have called one of the Next/Previous +// functions. +func (j *Journal) GetRealtimeUsec() (uint64, error) { + var usec C.uint64_t + + sd_journal_get_realtime_usec, err := getFunction("sd_journal_get_realtime_usec") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_get_realtime_usec(sd_journal_get_realtime_usec, j.cjournal, &usec) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to get realtime timestamp: %d", syscall.Errno(-r)) + } + + return uint64(usec), nil +} + +// GetMonotonicUsec gets the monotonic timestamp of the journal entry +// referenced by the last completed Next/Previous function call. To call +// GetMonotonicUsec, you must first have called one of the Next/Previous +// functions. +func (j *Journal) GetMonotonicUsec() (uint64, error) { + var usec C.uint64_t + var boot_id C.sd_id128_t + + sd_journal_get_monotonic_usec, err := getFunction("sd_journal_get_monotonic_usec") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_get_monotonic_usec(sd_journal_get_monotonic_usec, j.cjournal, &usec, &boot_id) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to get monotonic timestamp: %d", syscall.Errno(-r)) + } + + return uint64(usec), nil +} + +// GetCursor gets the cursor of the last journal entry reeferenced by the +// last completed Next/Previous function call. To call GetCursor, you must +// first have called one of the Next/Previous functions. +func (j *Journal) GetCursor() (string, error) { + sd_journal_get_cursor, err := getFunction("sd_journal_get_cursor") + if err != nil { + return "", err + } + + var d *C.char + // since the pointer is mutated by sd_journal_get_cursor, need to wait + // until after the call to free the memory + + j.mu.Lock() + r := C.my_sd_journal_get_cursor(sd_journal_get_cursor, j.cjournal, &d) + j.mu.Unlock() + defer C.free(unsafe.Pointer(d)) + + if r < 0 { + return "", fmt.Errorf("failed to get cursor: %d", syscall.Errno(-r)) + } + + cursor := C.GoString(d) + + return cursor, nil +} + +// TestCursor checks whether the current position in the journal matches the +// specified cursor +func (j *Journal) TestCursor(cursor string) error { + sd_journal_test_cursor, err := getFunction("sd_journal_test_cursor") + if err != nil { + return err + } + + c := C.CString(cursor) + defer C.free(unsafe.Pointer(c)) + + j.mu.Lock() + r := C.my_sd_journal_test_cursor(sd_journal_test_cursor, j.cjournal, c) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to test to cursor %q: %d", cursor, syscall.Errno(-r)) + } else if r == 0 { + return ErrNoTestCursor + } + + return nil +} + +// SeekHead seeks to the beginning of the journal, i.e. the oldest available +// entry. This call must be followed by a call to Next before any call to +// Get* will return data about the first element. +func (j *Journal) SeekHead() error { + sd_journal_seek_head, err := getFunction("sd_journal_seek_head") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_seek_head(sd_journal_seek_head, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to seek to head of journal: %d", syscall.Errno(-r)) + } + + return nil +} + +// SeekTail may be used to seek to the end of the journal, i.e. the most recent +// available entry. This call must be followed by a call to Next before any +// call to Get* will return data about the last element. +func (j *Journal) SeekTail() error { + sd_journal_seek_tail, err := getFunction("sd_journal_seek_tail") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_seek_tail(sd_journal_seek_tail, j.cjournal) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to seek to tail of journal: %d", syscall.Errno(-r)) + } + + return nil +} + +// SeekRealtimeUsec seeks to the entry with the specified realtime (wallclock) +// timestamp, i.e. CLOCK_REALTIME. This call must be followed by a call to +// Next/Previous before any call to Get* will return data about the sought entry. +func (j *Journal) SeekRealtimeUsec(usec uint64) error { + sd_journal_seek_realtime_usec, err := getFunction("sd_journal_seek_realtime_usec") + if err != nil { + return err + } + + j.mu.Lock() + r := C.my_sd_journal_seek_realtime_usec(sd_journal_seek_realtime_usec, j.cjournal, C.uint64_t(usec)) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to seek to %d: %d", usec, syscall.Errno(-r)) + } + + return nil +} + +// SeekCursor seeks to a concrete journal cursor. This call must be +// followed by a call to Next/Previous before any call to Get* will return +// data about the sought entry. +func (j *Journal) SeekCursor(cursor string) error { + sd_journal_seek_cursor, err := getFunction("sd_journal_seek_cursor") + if err != nil { + return err + } + + c := C.CString(cursor) + defer C.free(unsafe.Pointer(c)) + + j.mu.Lock() + r := C.my_sd_journal_seek_cursor(sd_journal_seek_cursor, j.cjournal, c) + j.mu.Unlock() + + if r < 0 { + return fmt.Errorf("failed to seek to cursor %q: %d", cursor, syscall.Errno(-r)) + } + + return nil +} + +// Wait will synchronously wait until the journal gets changed. The maximum time +// this call sleeps may be controlled with the timeout parameter. If +// sdjournal.IndefiniteWait is passed as the timeout parameter, Wait will +// wait indefinitely for a journal change. +func (j *Journal) Wait(timeout time.Duration) int { + var to uint64 + + sd_journal_wait, err := getFunction("sd_journal_wait") + if err != nil { + return -1 + } + + if timeout == IndefiniteWait { + // sd_journal_wait(3) calls for a (uint64_t) -1 to be passed to signify + // indefinite wait, but using a -1 overflows our C.uint64_t, so we use an + // equivalent hex value. + to = 0xffffffffffffffff + } else { + to = uint64(timeout / time.Microsecond) + } + j.mu.Lock() + r := C.my_sd_journal_wait(sd_journal_wait, j.cjournal, C.uint64_t(to)) + j.mu.Unlock() + + return int(r) +} + +// GetUsage returns the journal disk space usage, in bytes. +func (j *Journal) GetUsage() (uint64, error) { + var out C.uint64_t + + sd_journal_get_usage, err := getFunction("sd_journal_get_usage") + if err != nil { + return 0, err + } + + j.mu.Lock() + r := C.my_sd_journal_get_usage(sd_journal_get_usage, j.cjournal, &out) + j.mu.Unlock() + + if r < 0 { + return 0, fmt.Errorf("failed to get journal disk space usage: %d", syscall.Errno(-r)) + } + + return uint64(out), nil +} + +// GetUniqueValues returns all unique values for a given field. +func (j *Journal) GetUniqueValues(field string) ([]string, error) { + var result []string + + sd_journal_query_unique, err := getFunction("sd_journal_query_unique") + if err != nil { + return nil, err + } + + sd_journal_enumerate_unique, err := getFunction("sd_journal_enumerate_unique") + if err != nil { + return nil, err + } + + sd_journal_restart_unique, err := getFunction("sd_journal_restart_unique") + if err != nil { + return nil, err + } + + j.mu.Lock() + defer j.mu.Unlock() + + f := C.CString(field) + defer C.free(unsafe.Pointer(f)) + + r := C.my_sd_journal_query_unique(sd_journal_query_unique, j.cjournal, f) + + if r < 0 { + return nil, fmt.Errorf("failed to query journal: %d", syscall.Errno(-r)) + } + + // Implements the SD_JOURNAL_FOREACH_UNIQUE macro from sd-journal.h + var d unsafe.Pointer + var l C.size_t + C.my_sd_journal_restart_unique(sd_journal_restart_unique, j.cjournal) + for { + r = C.my_sd_journal_enumerate_unique(sd_journal_enumerate_unique, j.cjournal, &d, &l) + if r == 0 { + break + } + + if r < 0 { + return nil, fmt.Errorf("failed to read message field: %d", syscall.Errno(-r)) + } + + msg := C.GoStringN((*C.char)(d), C.int(l)) + kv := strings.SplitN(msg, "=", 2) + if len(kv) < 2 { + return nil, fmt.Errorf("failed to parse field") + } + + result = append(result, kv[1]) + } + + return result, nil +} + +// GetCatalog retrieves a message catalog entry for the journal entry referenced +// by the last completed Next/Previous function call. To call GetCatalog, you +// must first have called one of these functions. +func (j *Journal) GetCatalog() (string, error) { + sd_journal_get_catalog, err := getFunction("sd_journal_get_catalog") + if err != nil { + return "", err + } + + var c *C.char + + j.mu.Lock() + r := C.my_sd_journal_get_catalog(sd_journal_get_catalog, j.cjournal, &c) + j.mu.Unlock() + defer C.free(unsafe.Pointer(c)) + + if r < 0 { + return "", fmt.Errorf("failed to retrieve catalog entry for current journal entry: %d", syscall.Errno(-r)) + } + + catalog := C.GoString(c) + + return catalog, nil +} diff --git a/vendor/github.com/coreos/go-systemd/sdjournal/read.go b/vendor/github.com/coreos/go-systemd/sdjournal/read.go new file mode 100644 index 00000000000..51a060fb530 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/sdjournal/read.go @@ -0,0 +1,272 @@ +// Copyright 2015 RedHat, Inc. +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sdjournal + +import ( + "errors" + "fmt" + "io" + "log" + "strings" + "sync" + "time" +) + +var ( + // ErrExpired gets returned when the Follow function runs into the + // specified timeout. + ErrExpired = errors.New("Timeout expired") +) + +// JournalReaderConfig represents options to drive the behavior of a JournalReader. +type JournalReaderConfig struct { + // The Since, NumFromTail and Cursor options are mutually exclusive and + // determine where the reading begins within the journal. The order in which + // options are written is exactly the order of precedence. + Since time.Duration // start relative to a Duration from now + NumFromTail uint64 // start relative to the tail + Cursor string // start relative to the cursor + + // Show only journal entries whose fields match the supplied values. If + // the array is empty, entries will not be filtered. + Matches []Match + + // If not empty, the journal instance will point to a journal residing + // in this directory. The supplied path may be relative or absolute. + Path string + + // If not nil, Formatter will be used to translate the resulting entries + // into strings. If not set, the default format (timestamp and message field) + // will be used. If Formatter returns an error, Read will stop and return the error. + Formatter func(entry *JournalEntry) (string, error) +} + +// JournalReader is an io.ReadCloser which provides a simple interface for iterating through the +// systemd journal. A JournalReader is not safe for concurrent use by multiple goroutines. +type JournalReader struct { + journal *Journal + msgReader *strings.Reader + formatter func(entry *JournalEntry) (string, error) +} + +// NewJournalReader creates a new JournalReader with configuration options that are similar to the +// systemd journalctl tool's iteration and filtering features. +func NewJournalReader(config JournalReaderConfig) (*JournalReader, error) { + // use simpleMessageFormatter as default formatter. + if config.Formatter == nil { + config.Formatter = simpleMessageFormatter + } + + r := &JournalReader{ + formatter: config.Formatter, + } + + // Open the journal + var err error + if config.Path != "" { + r.journal, err = NewJournalFromDir(config.Path) + } else { + r.journal, err = NewJournal() + } + if err != nil { + return nil, err + } + + // Add any supplied matches + for _, m := range config.Matches { + if err = r.journal.AddMatch(m.String()); err != nil { + return nil, err + } + } + + // Set the start position based on options + if config.Since != 0 { + // Start based on a relative time + start := time.Now().Add(config.Since) + if err := r.journal.SeekRealtimeUsec(uint64(start.UnixNano() / 1000)); err != nil { + return nil, err + } + } else if config.NumFromTail != 0 { + // Start based on a number of lines before the tail + if err := r.journal.SeekTail(); err != nil { + return nil, err + } + + // Move the read pointer into position near the tail. Go one further than + // the option so that the initial cursor advancement positions us at the + // correct starting point. + skip, err := r.journal.PreviousSkip(config.NumFromTail + 1) + if err != nil { + return nil, err + } + // If we skipped fewer lines than expected, we have reached journal start. + // Thus, we seek to head so that next invocation can read the first line. + if skip != config.NumFromTail+1 { + if err := r.journal.SeekHead(); err != nil { + return nil, err + } + } + } else if config.Cursor != "" { + // Start based on a custom cursor + if err := r.journal.SeekCursor(config.Cursor); err != nil { + return nil, err + } + } + + return r, nil +} + +// Read reads entries from the journal. Read follows the Reader interface so +// it must be able to read a specific amount of bytes. Journald on the other +// hand only allows us to read full entries of arbitrary size (without byte +// granularity). JournalReader is therefore internally buffering entries that +// don't fit in the read buffer. Callers should keep calling until 0 and/or an +// error is returned. +func (r *JournalReader) Read(b []byte) (int, error) { + if r.msgReader == nil { + // Advance the journal cursor. It has to be called at least one time + // before reading + c, err := r.journal.Next() + + // An unexpected error + if err != nil { + return 0, err + } + + // EOF detection + if c == 0 { + return 0, io.EOF + } + + entry, err := r.journal.GetEntry() + if err != nil { + return 0, err + } + + // Build a message + msg, err := r.formatter(entry) + if err != nil { + return 0, err + } + r.msgReader = strings.NewReader(msg) + } + + // Copy and return the message + sz, err := r.msgReader.Read(b) + if err == io.EOF { + // The current entry has been fully read. Don't propagate this + // EOF, so the next entry can be read at the next Read() + // iteration. + r.msgReader = nil + return sz, nil + } + if err != nil { + return sz, err + } + if r.msgReader.Len() == 0 { + r.msgReader = nil + } + + return sz, nil +} + +// Close closes the JournalReader's handle to the journal. +func (r *JournalReader) Close() error { + return r.journal.Close() +} + +// Rewind attempts to rewind the JournalReader to the first entry. +func (r *JournalReader) Rewind() error { + r.msgReader = nil + return r.journal.SeekHead() +} + +// Follow synchronously follows the JournalReader, writing each new journal entry to writer. The +// follow will continue until a single time.Time is received on the until channel. +func (r *JournalReader) Follow(until <-chan time.Time, writer io.Writer) error { + + // Process journal entries and events. Entries are flushed until the tail or + // timeout is reached, and then we wait for new events or the timeout. + var msg = make([]byte, 64*1<<(10)) + var waitCh = make(chan int, 1) + var waitGroup sync.WaitGroup + defer waitGroup.Wait() + +process: + for { + c, err := r.Read(msg) + if err != nil && err != io.EOF { + return err + } + + select { + case <-until: + return ErrExpired + default: + } + if c > 0 { + if _, err = writer.Write(msg[:c]); err != nil { + return err + } + continue process + } + + // We're at the tail, so wait for new events or time out. + // Holds journal events to process. Tightly bounded for now unless there's a + // reason to unblock the journal watch routine more quickly. + for { + waitGroup.Add(1) + go func() { + status := r.journal.Wait(100 * time.Millisecond) + waitCh <- status + waitGroup.Done() + }() + + select { + case <-until: + return ErrExpired + case e := <-waitCh: + switch e { + case SD_JOURNAL_NOP: + // the journal did not change since the last invocation + case SD_JOURNAL_APPEND, SD_JOURNAL_INVALIDATE: + continue process + default: + if e < 0 { + return fmt.Errorf("received error event: %d", e) + } + + log.Printf("received unknown event: %d\n", e) + } + } + } + } +} + +// simpleMessageFormatter is the default formatter. +// It returns a string representing the current journal entry in a simple format which +// includes the entry timestamp and MESSAGE field. +func simpleMessageFormatter(entry *JournalEntry) (string, error) { + msg, ok := entry.Fields["MESSAGE"] + if !ok { + return "", fmt.Errorf("no MESSAGE field present in journal entry") + } + + usec := entry.RealtimeTimestamp + timestamp := time.Unix(0, int64(usec)*int64(time.Microsecond)) + + return fmt.Sprintf("%s %s\n", timestamp, msg), nil +} diff --git a/vendor/github.com/coreos/pkg/CONTRIBUTING.md b/vendor/github.com/coreos/pkg/CONTRIBUTING.md new file mode 100644 index 00000000000..6662073a848 --- /dev/null +++ b/vendor/github.com/coreos/pkg/CONTRIBUTING.md @@ -0,0 +1,71 @@ +# How to Contribute + +CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via +GitHub pull requests. This document outlines some of the conventions on +development workflow, commit message formatting, contact points and other +resources to make it easier to get your contribution accepted. + +# Certificate of Origin + +By contributing to this project you agree to the Developer Certificate of +Origin (DCO). This document was created by the Linux Kernel community and is a +simple statement that you, as a contributor, have the legal right to make the +contribution. See the [DCO](DCO) file for details. + +# Email and Chat + +The project currently uses the general CoreOS email list and IRC channel: +- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev) +- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org + +Please avoid emailing maintainers found in the MAINTAINERS file directly. They +are very busy and read the mailing lists. + +## Getting Started + +- Fork the repository on GitHub +- Read the [README](README.md) for build and test instructions +- Play with the project, submit bugs, submit patches! + +## Contribution Flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a topic branch from where you want to base your work (usually master). +- Make commits of logical units. +- Make sure your commit messages are in the proper format (see below). +- Push your changes to a topic branch in your fork of the repository. +- Make sure the tests pass, and add any new tests as appropriate. +- Submit a pull request to the original repository. + +Thanks for your contributions! + +### Format of the Commit Message + +We follow a rough convention for commit messages that is designed to answer two +questions: what changed and why. The subject line should feature the what and +the body of the commit should describe the why. + +``` +scripts: add the test-cluster command + +this uses tmux to setup a test cluster that you can easily kill and +start for debugging. + +Fixes #38 +``` + +The format can be described more formally as follows: + +``` +: + + + +