diff --git a/.ci/beats-tester.groovy b/.ci/beats-tester.groovy index eb1357700b6..91781a98d31 100644 --- a/.ci/beats-tester.groovy +++ b/.ci/beats-tester.groovy @@ -54,6 +54,7 @@ pipeline { options { skipDefaultCheckout() } when { branch 'master' } steps { + // TODO: to use the git commit that triggered the upstream build runBeatsTesterJob(version: "${env.VERSION}-SNAPSHOT") } } @@ -61,6 +62,7 @@ pipeline { options { skipDefaultCheckout() } when { branch '*.x' } steps { + // TODO: to use the git commit that triggered the upstream build runBeatsTesterJob(version: "${env.VERSION}-SNAPSHOT") } } @@ -84,6 +86,7 @@ pipeline { } } steps { + // TODO: to use the git commit that triggered the upstream build runBeatsTesterJob(version: "${env.VERSION}-SNAPSHOT") } } diff --git a/.ci/packaging.groovy b/.ci/packaging.groovy index 4145ee6bdd1..91902595a3c 100644 --- a/.ci/packaging.groovy +++ b/.ci/packaging.groovy @@ -191,10 +191,14 @@ def pushCIDockerImages(){ } } -def tagAndPush(name){ +def tagAndPush(beatName){ def libbetaVer = sh(label: 'Get libbeat version', script: 'grep defaultBeatVersion ${BASE_DIR}/libbeat/version/version.go|cut -d "=" -f 2|tr -d \\"', returnStdout: true)?.trim() + def aliasVersion = "" if("${env.SNAPSHOT}" == "true"){ + aliasVersion = libbetaVer.substring(0, libbetaVer.lastIndexOf(".")) // remove third number in version + libbetaVer += "-SNAPSHOT" + aliasVersion += "-SNAPSHOT" } def tagName = "${libbetaVer}" @@ -207,25 +211,37 @@ def tagAndPush(name){ // supported image flavours def variants = ["", "-oss", "-ubi8"] variants.each { variant -> - def oldName = "${DOCKER_REGISTRY}/beats/${name}${variant}:${libbetaVer}" - def newName = "${DOCKER_REGISTRY}/observability-ci/${name}${variant}:${tagName}" - def commitName = "${DOCKER_REGISTRY}/observability-ci/${name}${variant}:${env.GIT_BASE_COMMIT}" - - def iterations = 0 - retryWithSleep(retries: 3, seconds: 5, backoff: true) { - iterations++ - def status = sh(label:'Change tag and push', script: """ - docker tag ${oldName} ${newName} - docker push ${newName} - docker tag ${oldName} ${commitName} - docker push ${commitName} - """, returnStatus: true) - - if ( status > 0 && iterations < 3) { - error('tag and push failed, retry') - } else if ( status > 0 ) { - log(level: 'WARN', text: "${name} doesn't have ${variant} docker images. See https://github.com/elastic/beats/pull/21621") - } + doTagAndPush(beatName, variant, libbetaVer, tagName) + doTagAndPush(beatName, variant, libbetaVer, "${env.GIT_BASE_COMMIT}") + + if (!isPR() && aliasVersion != "") { + doTagAndPush(beatName, variant, libbetaVer, aliasVersion) + } + } +} + +/** +* @param beatName name of the Beat +* @param variant name of the variant used to build the docker image name +* @param sourceTag tag to be used as source for the docker tag command, usually under the 'beats' namespace +* @param targetTag tag to be used as target for the docker tag command, usually under the 'observability-ci' namespace +*/ +def doTagAndPush(beatName, variant, sourceTag, targetTag) { + def sourceName = "${DOCKER_REGISTRY}/beats/${beatName}${variant}:${sourceTag}" + def targetName = "${DOCKER_REGISTRY}/observability-ci/${beatName}${variant}:${targetTag}" + + def iterations = 0 + retryWithSleep(retries: 3, seconds: 5, backoff: true) { + iterations++ + def status = sh(label: "Change tag and push ${targetName}", script: """ + docker tag ${sourceName} ${targetName} + docker push ${targetName} + """, returnStatus: true) + + if ( status > 0 && iterations < 3) { + error("tag and push failed for ${beatName}, retry") + } else if ( status > 0 ) { + log(level: 'WARN', text: "${beatName} doesn't have ${variant} docker images. See https://github.com/elastic/beats/pull/21621") } } } @@ -327,7 +343,16 @@ def publishPackages(baseDir){ bucketUri = "gs://${JOB_GCS_BUCKET}/pull-requests/pr-${env.CHANGE_ID}" } def beatsFolderName = getBeatsName(baseDir) - googleStorageUpload(bucket: "${bucketUri}/${beatsFolderName}", + uploadPackages("${bucketUri}/${beatsFolderName}", baseDir) + + // Copy those files to another location with the sha commit to test them + // aftewords. + bucketUri = "gs://${JOB_GCS_BUCKET}/commits/${env.GIT_BASE_COMMIT}" + uploadPackages("${bucketUri}/${beatsFolderName}", baseDir) +} + +def uploadPackages(bucketUri, baseDir){ + googleStorageUpload(bucket: bucketUri, credentialsId: "${JOB_GCS_CREDENTIALS}", pathPrefix: "${baseDir}/build/distributions/", pattern: "${baseDir}/build/distributions/**/*", diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 51255305f42..f2750175969 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -375,6 +375,9 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix retrieving resources by ID for the azure module. {pull}21711[21711] {issue}21707[21707] - Use timestamp from CloudWatch API when creating events. {pull}21498[21498] - Report the correct windows events for system/filesystem {pull}21758[21758] +- Fix azure storage event format. {pull}21845[21845] +- Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] +- [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] *Packetbeat* @@ -630,6 +633,8 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - New juniper.srx dataset for Juniper SRX logs. {pull}20017[20017] - Adding support for Microsoft 365 Defender (Microsoft Threat Protection) {pull}21446[21446] - Adding support for FIPS in s3 input {pull}21446[21446] +- Add SSL option to checkpoint module {pull}19560[19560] +- Add max_number_of_messages config into s3 input. {pull}21993[21993] *Heartbeat* diff --git a/NOTICE.txt b/NOTICE.txt index 349fe58b3d1..477f0b53201 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -2183,12 +2183,12 @@ Contents of probable licence file $GOMODCACHE/github.com/!azure/go-autorest/auto -------------------------------------------------------------------------------- -Dependency : github.com/Microsoft/go-winio -Version: v0.4.15-0.20190919025122-fc70bd9a86b5 +Dependency : github.com/bi-zone/go-winio +Version: v0.4.15 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!microsoft/go-winio@v0.4.15-0.20190919025122-fc70bd9a86b5/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/bi-zone/go-winio@v0.4.15/LICENSE: The MIT License (MIT) diff --git a/deploy/kubernetes/metricbeat-kubernetes.yaml b/deploy/kubernetes/metricbeat-kubernetes.yaml index 32d1010f4d0..db1eb25d7a5 100644 --- a/deploy/kubernetes/metricbeat-kubernetes.yaml +++ b/deploy/kubernetes/metricbeat-kubernetes.yaml @@ -189,8 +189,6 @@ spec: - name: modules mountPath: /usr/share/metricbeat/modules.d readOnly: true - - name: dockersock - mountPath: /var/run/docker.sock - name: proc mountPath: /hostfs/proc readOnly: true @@ -204,9 +202,6 @@ spec: - name: cgroup hostPath: path: /sys/fs/cgroup - - name: dockersock - hostPath: - path: /var/run/docker.sock - name: config configMap: defaultMode: 0640 diff --git a/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml b/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml index 0197fe136b6..34bcf536068 100644 --- a/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml +++ b/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml @@ -64,8 +64,6 @@ spec: - name: modules mountPath: /usr/share/metricbeat/modules.d readOnly: true - - name: dockersock - mountPath: /var/run/docker.sock - name: proc mountPath: /hostfs/proc readOnly: true @@ -79,9 +77,6 @@ spec: - name: cgroup hostPath: path: /sys/fs/cgroup - - name: dockersock - hostPath: - path: /var/run/docker.sock - name: config configMap: defaultMode: 0640 diff --git a/filebeat/docs/modules/checkpoint.asciidoc b/filebeat/docs/modules/checkpoint.asciidoc index c4e453b452d..841e66fdbab 100644 --- a/filebeat/docs/modules/checkpoint.asciidoc +++ b/filebeat/docs/modules/checkpoint.asciidoc @@ -70,6 +70,18 @@ A list of tags to include in events. Including `forwarded` indicates that the events did not originate on this host and causes `host.name` to not be added to events. Defaults to `[checkpoint-firewall, forwarded]`. +*`var.ssl`*:: + +The SSL/TLS configuration for the filebeat instance. This can be used to enforce mutual TLS. +```yaml +ssl: + enabled: true + certificate_authorities: ["my-ca.pem"] + certificate: "filebeat-cert.pem" + key: "filebeat-key.pem" + client_authentication: "required" +``` + [float] ==== Check Point devices diff --git a/go.mod b/go.mod index 720690f1f2f..2ef65606319 100644 --- a/go.mod +++ b/go.mod @@ -189,6 +189,7 @@ require ( replace ( github.com/Azure/go-autorest => github.com/Azure/go-autorest v12.2.0+incompatible + github.com/Microsoft/go-winio => github.com/bi-zone/go-winio v0.4.15 github.com/Shopify/sarama => github.com/elastic/sarama v1.19.1-0.20200629123429-0e7b69039eec github.com/cucumber/godog => github.com/cucumber/godog v0.8.1 github.com/docker/docker => github.com/docker/engine v0.0.0-20191113042239-ea84732a7725 diff --git a/go.sum b/go.sum index 5c01c612fe3..97f31d79292 100644 --- a/go.sum +++ b/go.sum @@ -80,8 +80,6 @@ github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -132,6 +130,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bi-zone/go-winio v0.4.15 h1:viLHm+U7bzIkfVHuWgc3Wp/sT5zaLoRG7XdOEy1b12w= +github.com/bi-zone/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/blakerouse/service v1.1.1-0.20200924160513-057808572ffa h1:aXHPZwx8Y5z8r+1WPylnu095usTf6QSshaHs6nVMBc0= github.com/blakerouse/service v1.1.1-0.20200924160513-057808572ffa/go.mod h1:RrJI2xn5vve/r32U5suTbeaSGoMU6GbNPoj36CVYcHc= github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2 h1:oMCHnXa6CCCafdPDbMh/lWRhRByN0VFLvv+g+ayx1SI= diff --git a/libbeat/autodiscover/providers/docker/docker_integration_test.go b/libbeat/autodiscover/providers/docker/docker_integration_test.go index bbb2bc979bc..898f3cd254c 100644 --- a/libbeat/autodiscover/providers/docker/docker_integration_test.go +++ b/libbeat/autodiscover/providers/docker/docker_integration_test.go @@ -36,8 +36,6 @@ import ( // Test docker start emits an autodiscover event func TestDockerStart(t *testing.T) { - t.Skip("#20360 Flaky TestDockerStart skipped") - log := logp.NewLogger("docker") d, err := dk.NewClient() @@ -70,15 +68,17 @@ func TestDockerStart(t *testing.T) { // Start cmd := []string{"echo", "Hi!"} labels := map[string]string{"label": "foo", "label.child": "bar"} - ID, err := d.ContainerStart("busybox", cmd, labels) + ID, err := d.ContainerStart("busybox:latest", cmd, labels) if err != nil { t.Fatal(err) } - checkEvent(t, listener, true) + defer d.ContainerRemove(ID) + + checkEvent(t, listener, ID, true) // Kill d.ContainerKill(ID) - checkEvent(t, listener, false) + checkEvent(t, listener, ID, false) } func getValue(e bus.Event, key string) interface{} { @@ -89,12 +89,13 @@ func getValue(e bus.Event, key string) interface{} { return val } -func checkEvent(t *testing.T, listener bus.Listener, start bool) { +func checkEvent(t *testing.T, listener bus.Listener, id string, start bool) { + timeout := time.After(60 * time.Second) for { select { case e := <-listener.Events(): // Ignore any other container - if getValue(e, "docker.container.image") != "busybox" { + if getValue(e, "container.id") != id { continue } if start { @@ -104,7 +105,7 @@ func checkEvent(t *testing.T, listener bus.Listener, start bool) { assert.Equal(t, getValue(e, "stop"), true) assert.Nil(t, getValue(e, "start")) } - assert.Equal(t, getValue(e, "container.image.name"), "busybox") + assert.Equal(t, getValue(e, "container.image.name"), "busybox:latest") // labels.dedot=true by default assert.Equal(t, common.MapStr{ @@ -122,8 +123,7 @@ func checkEvent(t *testing.T, listener bus.Listener, start bool) { assert.Equal(t, getValue(e, "docker.container.name"), getValue(e, "meta.container.name")) assert.Equal(t, getValue(e, "docker.container.image"), getValue(e, "meta.container.image.name")) return - - case <-time.After(10 * time.Second): + case <-timeout: t.Fatal("Timeout waiting for provider events") return } diff --git a/libbeat/common/kubernetes/k8skeystore/kubernetes_keystore.go b/libbeat/common/kubernetes/k8skeystore/kubernetes_keystore.go index 616525b432a..e17b4258232 100644 --- a/libbeat/common/kubernetes/k8skeystore/kubernetes_keystore.go +++ b/libbeat/common/kubernetes/k8skeystore/kubernetes_keystore.go @@ -30,14 +30,10 @@ import ( "github.com/elastic/beats/v7/libbeat/logp" ) -type KubernetesKeystores map[string]keystore.Keystore - -// KubernetesKeystoresRegistry holds KubernetesKeystores for known namespaces. Once a Keystore for one k8s namespace -// is initialized it will be reused every time it is needed. +// KubernetesKeystoresRegistry implements a Provider for Keystore. type KubernetesKeystoresRegistry struct { - kubernetesKeystores KubernetesKeystores - logger *logp.Logger - client k8s.Interface + logger *logp.Logger + client k8s.Interface } // KubernetesSecretsKeystore allows to retrieve passwords from Kubernetes secrets for a given namespace @@ -56,9 +52,8 @@ func Factoryk8s(keystoreNamespace string, ks8client k8s.Interface, logger *logp. // NewKubernetesKeystoresRegistry initializes a KubernetesKeystoresRegistry func NewKubernetesKeystoresRegistry(logger *logp.Logger, client k8s.Interface) keystore.Provider { return &KubernetesKeystoresRegistry{ - kubernetesKeystores: KubernetesKeystores{}, - logger: logger, - client: client, + logger: logger, + client: client, } } @@ -75,12 +70,7 @@ func (kr *KubernetesKeystoresRegistry) GetKeystore(event bus.Event) keystore.Key namespace = ns.(string) } if namespace != "" { - // either retrieve already stored keystore or create a new one for the namespace - if storedKeystore, ok := kr.kubernetesKeystores[namespace]; ok { - return storedKeystore - } k8sKeystore, _ := Factoryk8s(namespace, kr.client, kr.logger) - kr.kubernetesKeystores["namespace"] = k8sKeystore return k8sKeystore } kr.logger.Debugf("Cannot retrieve kubernetes namespace from event: %s", event) diff --git a/libbeat/docs/getting-started.asciidoc b/libbeat/docs/getting-started.asciidoc index b1a85fddb46..5291f755e5b 100644 --- a/libbeat/docs/getting-started.asciidoc +++ b/libbeat/docs/getting-started.asciidoc @@ -13,5 +13,5 @@ Each Beat is a separately installable product. To learn how to get started, see: * {winlogbeat-ref}/winlogbeat-installation-configuration.html[Winlogbeat] If you're planning to use the {metrics-app} or the {logs-app} in {kib}, -also see the {metrics-guide}[Metrics Monitoring Guide] -and the {logs-guide}[Logs Monitoring Guide]. +see {observability-guide}/analyze-metrics.html[Analyze metrics] +and {observability-guide}/monitor-logs.html[Monitor logs]. \ No newline at end of file diff --git a/libbeat/docs/howto/load-dashboards.asciidoc b/libbeat/docs/howto/load-dashboards.asciidoc index 781789d3ae4..c03b512d636 100644 --- a/libbeat/docs/howto/load-dashboards.asciidoc +++ b/libbeat/docs/howto/load-dashboards.asciidoc @@ -15,8 +15,8 @@ ifdef::has_solutions[] TIP: For deeper observability into your infrastructure, you can use the {metrics-app} and the {logs-app} in {kib}. -For more details, see the {metrics-guide}[Metrics Monitoring Guide] -and the {logs-guide}[Logs Monitoring Guide]. +For more details, see {observability-guide}/analyze-metrics.html[Analyze metrics] +and {observability-guide}/monitor-logs.html[Monitor logs]. endif::has_solutions[] {beatname_uc} comes packaged with example Kibana dashboards, visualizations, diff --git a/libbeat/docs/overview.asciidoc b/libbeat/docs/overview.asciidoc index 11dc10f2b8f..bdc46aaaf28 100644 --- a/libbeat/docs/overview.asciidoc +++ b/libbeat/docs/overview.asciidoc @@ -28,8 +28,8 @@ To get started, see <>. Want to get up and running quickly with infrastructure metrics monitoring and centralized log analytics? Try out the {metrics-app} and the {logs-app} in {kib}. -For more details, see the {metrics-guide}[Metrics Monitoring Guide] -and the {logs-guide}[Logs Monitoring Guide]. +For more details, see {observability-guide}/analyze-metrics.html[Analyze metrics] +and {observability-guide}/monitor-logs.html[Monitor logs]. [float] === Need to capture other kinds of data? diff --git a/libbeat/docs/shared-faq.asciidoc b/libbeat/docs/shared-faq.asciidoc index 9aa8c3442c1..d6c48b73aa9 100644 --- a/libbeat/docs/shared-faq.asciidoc +++ b/libbeat/docs/shared-faq.asciidoc @@ -54,6 +54,27 @@ connect to the Lumberjack input plugin. To learn how to install and update plugins, see {logstash-ref}/working-with-plugins.html[Working with plugins]. endif::[] +ifndef::no-output-logstash[] +[[publishing-ls-fails-connection-reset-by-peer]] +=== Publishing to {ls} fails with "connection reset by peer" message + +{beatname_uc} requires a persistent TCP connection to {ls}. If a firewall interferes +with the connection, you might see errors like this: + +[source,shell] +---------------------------------------------------------------------- +Failed to publish events caused by: write tcp ... write: connection reset by peer +---------------------------------------------------------------------- + + +To solve the problem: + +* make sure the firewall is not closing connections between {beatname_uc} and {ls}, or +* set the `ttl` value in the <> to a value that's +lower than the maximum time allowed by the firewall, and set `pipelining` to 0 +(pipelining cannot be enabled when `ttl` is used). +endif::[] + ifndef::no-output-logstash[] [[metadata-missing]] === @metadata is missing in {ls} diff --git a/libbeat/tests/docker/docker.go b/libbeat/tests/docker/docker.go index 888347c5cc7..8bb5efadbfa 100644 --- a/libbeat/tests/docker/docker.go +++ b/libbeat/tests/docker/docker.go @@ -19,6 +19,8 @@ package docker import ( "context" + "io" + "io/ioutil" "github.com/pkg/errors" @@ -42,13 +44,12 @@ func NewClient() (Client, error) { // ContainerStart pulls and starts the given container func (c Client) ContainerStart(image string, cmd []string, labels map[string]string) (string, error) { - ctx := context.Background() - respBody, err := c.cli.ImagePull(ctx, image, types.ImagePullOptions{}) + err := c.imagePull(image) if err != nil { - return "", errors.Wrapf(err, "pullling image %s", image) + return "", err } - defer respBody.Close() + ctx := context.Background() resp, err := c.cli.ContainerCreate(ctx, &container.Config{ Image: image, Cmd: cmd, @@ -65,6 +66,36 @@ func (c Client) ContainerStart(image string, cmd []string, labels map[string]str return resp.ID, nil } +// imagePull pulls an image +func (c Client) imagePull(image string) (err error) { + ctx := context.Background() + _, _, err = c.cli.ImageInspectWithRaw(ctx, image) + if err == nil { + // Image already available, do nothing + return nil + } + for retry := 0; retry < 3; retry++ { + err = func() error { + respBody, err := c.cli.ImagePull(ctx, image, types.ImagePullOptions{}) + if err != nil { + return errors.Wrapf(err, "pullling image %s", image) + } + defer respBody.Close() + + // Read all the response, to be sure that the pull has finished before returning. + _, err = io.Copy(ioutil.Discard, respBody) + if err != nil { + return errors.Wrapf(err, "reading response for image %s", image) + } + return nil + }() + if err == nil { + break + } + } + return +} + // ContainerWait waits for a container to finish func (c Client) ContainerWait(ID string) error { ctx := context.Background() @@ -89,7 +120,7 @@ func (c Client) ContainerKill(ID string) error { return c.cli.ContainerKill(ctx, ID, "KILL") } -// ContainerRemove kills and removed the given container +// ContainerRemove kills and removes the given container func (c Client) ContainerRemove(ID string) error { ctx := context.Background() return c.cli.ContainerRemove(ctx, ID, types.ContainerRemoveOptions{ diff --git a/metricbeat/docs/modules/istio.asciidoc b/metricbeat/docs/modules/istio.asciidoc index c80e2d84c09..fee401e1983 100644 --- a/metricbeat/docs/modules/istio.asciidoc +++ b/metricbeat/docs/modules/istio.asciidoc @@ -10,8 +10,8 @@ beta[] This is the Istio module. This module is compatible with versions before `1.5` of Istio where microservices architecture is used. If using -versions priot to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used. -where the Istio module collects metrics from the +versions prior to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used where the Istio +module collects metrics from the Istio https://istio.io/v1.4/docs/tasks/observability/metrics/querying-metrics/#about-the-prometheus-add-on[prometheus exporters endpoints]. For versions after `1.5`, `istiod` and `proxy` metricsets can be used. diff --git a/metricbeat/module/system/diskio/diskio.go b/metricbeat/module/system/diskio/diskio.go index 1359180cff6..4a7e2e2b5fe 100644 --- a/metricbeat/module/system/diskio/diskio.go +++ b/metricbeat/module/system/diskio/diskio.go @@ -21,6 +21,7 @@ package diskio import ( "fmt" + "runtime" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/metric/system/diskio" @@ -114,7 +115,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { diskWriteBytes += counters.WriteBytes //Add linux-only data if agent is off as not to make breaking changes. - if !m.IsAgent { + if !m.IsAgent && runtime.GOOS == "linux" { result, err := m.statistics.CalcIOStatistics(counters) if err != nil { return errors.Wrap(err, "error calculating iostat") diff --git a/metricbeat/module/system/memory/memory.go b/metricbeat/module/system/memory/memory.go index 27e76b85489..26c6bea1867 100644 --- a/metricbeat/module/system/memory/memory.go +++ b/metricbeat/module/system/memory/memory.go @@ -42,7 +42,7 @@ func init() { // MetricSet for fetching system memory metrics. type MetricSet struct { mb.BaseMetricSet - IsFleet bool + IsAgent bool } // New is a mb.MetricSetFactory that returns a memory.MetricSet. @@ -53,7 +53,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, fmt.Errorf("unexpected module type") } - return &MetricSet{BaseMetricSet: base, IsFleet: systemModule.IsAgent}, nil + return &MetricSet{BaseMetricSet: base, IsAgent: systemModule.IsAgent}, nil } // Fetch fetches memory metrics from the OS. @@ -117,7 +117,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { } // for backwards compatibility, only report if we're not in fleet mode - if !m.IsFleet { + if !m.IsAgent { err := linux.FetchLinuxMemStats(memory) if err != nil { return errors.Wrap(err, "error getting page stats") diff --git a/metricbeat/module/system/process/process.go b/metricbeat/module/system/process/process.go index 804c62d06d6..c99ffaa1123 100644 --- a/metricbeat/module/system/process/process.go +++ b/metricbeat/module/system/process/process.go @@ -156,10 +156,12 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { // There's some more Windows memory quirks we need to deal with. // "rss" is a linux concept, but "wss" is a direct match on Windows. // "share" is also unavailable on Windows. + if runtime.GOOS == "windows" { + proc.Delete("memory.share") + } if m.IsAgent { if runtime.GOOS == "windows" { - proc.Delete("memory.share") if setSize := getAndRemove(proc, "memory.rss"); setSize != nil { proc.Put("memory.wss", setSize) } diff --git a/x-pack/auditbeat/docs/modules/system.asciidoc b/x-pack/auditbeat/docs/modules/system.asciidoc index 15eafc34116..e850c065197 100644 --- a/x-pack/auditbeat/docs/modules/system.asciidoc +++ b/x-pack/auditbeat/docs/modules/system.asciidoc @@ -97,7 +97,7 @@ This module also supports the <> described later. -*`state.period`*:: The frequency at which the datasets send full state information. +*`state.period`*:: The interval at which the datasets send full state information. This option can be overridden per dataset using `{dataset}.state.period`. *`user.detect_password_changes`*:: If the `user` dataset is configured and diff --git a/x-pack/auditbeat/module/system/_meta/docs.asciidoc b/x-pack/auditbeat/module/system/_meta/docs.asciidoc index 083435d94ae..a2a36987c51 100644 --- a/x-pack/auditbeat/module/system/_meta/docs.asciidoc +++ b/x-pack/auditbeat/module/system/_meta/docs.asciidoc @@ -90,7 +90,7 @@ This module also supports the <> described later. -*`state.period`*:: The frequency at which the datasets send full state information. +*`state.period`*:: The interval at which the datasets send full state information. This option can be overridden per dataset using `{dataset}.state.period`. *`user.detect_password_changes`*:: If the `user` dataset is configured and diff --git a/x-pack/auditbeat/module/system/package/rpm_linux.go b/x-pack/auditbeat/module/system/package/rpm_linux.go index fa6fc66f4cd..6e5df7e0c6e 100644 --- a/x-pack/auditbeat/module/system/package/rpm_linux.go +++ b/x-pack/auditbeat/module/system/package/rpm_linux.go @@ -10,9 +10,12 @@ import ( "errors" "fmt" "runtime" + "strings" "time" "unsafe" + "debug/elf" + "github.com/coreos/pkg/dlopen" ) @@ -204,29 +207,57 @@ func (lib *librpm) close() error { return nil } -func openLibrpm() (*librpm, error) { - var librpmNames = []string{ - "librpm.so", // with rpm-devel installed - "librpm.so.9", // Fedora 31/32 - "librpm.so.8", // Fedora 29/30 - "librpm.so.3", // CentOS 7 - "librpm.so.1", // CentOS 6 - - // Following for completeness, but not explicitly tested - "librpm.so.10", - "librpm.so.7", - "librpm.so.6", - "librpm.so.5", - "librpm.so.4", - "librpm.so.2", +// getLibrpmNames determines the versions of librpm.so that are +// installed on a system. rpm-devel rpm installs the librpm.so +// symbolic link to the correct version of librpm, but that isn't a +// required package. rpm will install librpm.so.X, where X is the +// version number. getLibrpmNames looks at the elf header for the rpm +// binary to determine what version of librpm.so it is linked against. +func getLibrpmNames() []string { + var rpmPaths = []string{ + "/usr/bin/rpm", + "/bin/rpm", + } + var libNames = []string{ + "librpm.so", } + var rpmElf *elf.File + var err error + + for _, path := range rpmPaths { + rpmElf, err = elf.Open(path) + if err == nil { + break + } + } + if err != nil { + return libNames + } + + impLibs, err := rpmElf.ImportedLibraries() + if err != nil { + return libNames + } + + for _, lib := range impLibs { + if strings.Contains(lib, "librpm.so") { + libNames = append(libNames, lib) + } + } + + return libNames +} + +func openLibrpm() (*librpm, error) { var librpm librpm var err error + librpmNames := getLibrpmNames() + librpm.handle, err = dlopen.GetHandle(librpmNames) if err != nil { - return nil, err + return nil, fmt.Errorf("Couldn't open %v", librpmNames) } librpm.rpmtsCreate, err = librpm.handle.GetSymbolPointer("rpmtsCreate") diff --git a/x-pack/auditbeat/module/system/process/_meta/docs.asciidoc b/x-pack/auditbeat/module/system/process/_meta/docs.asciidoc index e1d930e1fbf..e84f7246933 100644 --- a/x-pack/auditbeat/module/system/process/_meta/docs.asciidoc +++ b/x-pack/auditbeat/module/system/process/_meta/docs.asciidoc @@ -2,10 +2,30 @@ beta[] -This is the `process` dataset of the system module. +This is the `process` dataset of the system module. It generates an event when +a process starts and stops. It is implemented for Linux, macOS (Darwin), and Windows. +[float] +=== Configuration options + +*`process.state.period`*:: The interval at which the dataset sends full state +information. If set this will take precedence over `state.period`. The default +value is `12h`. + +*`process.hash.max_file_size`*:: The maximum size of a file in bytes for which +{beatname_uc} will compute hashes. Files larger than this size will not be +hashed. The default value is 100 MiB. For convenience units can be specified as +a suffix to the value. The supported units are `b` (default), `kib`, `kb`, +`mib`, `mb`, `gib`, `gb`, `tib`, `tb`, `pib`, `pb`, `eib`, and `eb`. + +*`process.hash.hash_types`*:: A list of hash types to compute when the file +changes. The supported hash types are `blake2b_256`, `blake2b_384`, +`blake2b_512`, `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, +`sha512_224`, `sha512_256`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, and +`xxh64`. The default value is `sha1`. + [float] ==== Example dashboard diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index 64d1a3b589b..3882ba19712 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -18,6 +18,9 @@ - Prevent reporting ecs version twice {pull}21616[21616] - Partial extracted beat result in failure to spawn beat {issue}21718[21718] - Use local temp instead of system one {pull}21883[21883] +- Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] +- Fix issue with named pipes on Windows 7 {pull}21931[21931] +- Fix missing elastic_agent event data {pull}21994[21994] ==== New features diff --git a/x-pack/elastic-agent/pkg/agent/application/local_mode.go b/x-pack/elastic-agent/pkg/agent/application/local_mode.go index b58e260cab6..f0c4153f474 100644 --- a/x-pack/elastic-agent/pkg/agent/application/local_mode.go +++ b/x-pack/elastic-agent/pkg/agent/application/local_mode.go @@ -100,7 +100,7 @@ func newLocal( return nil, errors.New(err, "failed to initialize monitoring") } - router, err := newRouter(log, streamFactory(localApplication.bgContext, cfg.Settings, localApplication.srv, reporter, monitor)) + router, err := newRouter(log, streamFactory(localApplication.bgContext, agentInfo, cfg.Settings, localApplication.srv, reporter, monitor)) if err != nil { return nil, errors.New(err, "fail to initialize pipeline router") } diff --git a/x-pack/elastic-agent/pkg/agent/application/managed_mode.go b/x-pack/elastic-agent/pkg/agent/application/managed_mode.go index e38685741c3..fa31215f75d 100644 --- a/x-pack/elastic-agent/pkg/agent/application/managed_mode.go +++ b/x-pack/elastic-agent/pkg/agent/application/managed_mode.go @@ -154,7 +154,7 @@ func newManaged( return nil, errors.New(err, "failed to initialize monitoring") } - router, err := newRouter(log, streamFactory(managedApplication.bgContext, cfg.Settings, managedApplication.srv, combinedReporter, monitor)) + router, err := newRouter(log, streamFactory(managedApplication.bgContext, agentInfo, cfg.Settings, managedApplication.srv, combinedReporter, monitor)) if err != nil { return nil, errors.New(err, "fail to initialize pipeline router") } diff --git a/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go b/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go index 3fc49ef17d3..920b1a4b5bf 100644 --- a/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go +++ b/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go @@ -94,7 +94,6 @@ func getMonitoringRule(outputName string) *transpiler.RuleList { return transpiler.NewRuleList( transpiler.Copy(monitoringOutputSelector, outputKey), transpiler.Rename(fmt.Sprintf("%s.%s", outputsKey, outputName), elasticsearchKey), - transpiler.InjectAgentInfo(), transpiler.Filter(monitoringKey, programsKey, outputKey), ) } diff --git a/x-pack/elastic-agent/pkg/agent/application/stream.go b/x-pack/elastic-agent/pkg/agent/application/stream.go index 41999fcb832..2d372ef4387 100644 --- a/x-pack/elastic-agent/pkg/agent/application/stream.go +++ b/x-pack/elastic-agent/pkg/agent/application/stream.go @@ -7,6 +7,7 @@ package application import ( "context" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configrequest" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" @@ -40,10 +41,10 @@ func (b *operatorStream) Shutdown() { b.configHandler.Shutdown() } -func streamFactory(ctx context.Context, cfg *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) func(*logger.Logger, routingKey) (stream, error) { +func streamFactory(ctx context.Context, agentInfo *info.AgentInfo, cfg *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) func(*logger.Logger, routingKey) (stream, error) { return func(log *logger.Logger, id routingKey) (stream, error) { // new operator per stream to isolate processes without using tags - operator, err := newOperator(ctx, log, id, cfg, srv, r, m) + operator, err := newOperator(ctx, log, agentInfo, id, cfg, srv, r, m) if err != nil { return nil, err } @@ -55,10 +56,10 @@ func streamFactory(ctx context.Context, cfg *configuration.SettingsConfig, srv * } } -func newOperator(ctx context.Context, log *logger.Logger, id routingKey, config *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) (*operation.Operator, error) { - fetcher := downloader.NewDownloader(log, config.DownloadConfig) +func newOperator(ctx context.Context, log *logger.Logger, agentInfo *info.AgentInfo, id routingKey, config *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) (*operation.Operator, error) { + fetcher := downloader.NewDownloader(log, config.DownloadConfig, false) allowEmptyPgp, pgp := release.PGP() - verifier, err := downloader.NewVerifier(log, config.DownloadConfig, allowEmptyPgp, pgp) + verifier, err := downloader.NewVerifier(log, config.DownloadConfig, allowEmptyPgp, pgp, false) if err != nil { return nil, errors.New(err, "initiating verifier") } @@ -81,6 +82,7 @@ func newOperator(ctx context.Context, log *logger.Logger, id routingKey, config return operation.NewOperator( ctx, log, + agentInfo, id, config, fetcher, diff --git a/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go b/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go index 3aea96da0ab..0294308ff3a 100644 --- a/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go +++ b/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go @@ -27,12 +27,12 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri } allowEmptyPgp, pgp := release.PGP() - verifier, err := downloader.NewVerifier(u.log, &settings, allowEmptyPgp, pgp) + verifier, err := downloader.NewVerifier(u.log, &settings, allowEmptyPgp, pgp, true) if err != nil { return "", errors.New(err, "initiating verifier") } - fetcher := downloader.NewDownloader(u.log, &settings) + fetcher := downloader.NewDownloader(u.log, &settings, true) path, err := fetcher.Download(ctx, agentName, agentArtifactName, version) if err != nil { return "", errors.New(err, "failed upgrade of agent binary") diff --git a/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go b/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go index 1a21bc154a1..d7e69fc3972 100644 --- a/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go +++ b/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go @@ -183,9 +183,6 @@ func (u *Upgrader) Ack(ctx context.Context) error { } func (u *Upgrader) sourceURI(version, retrievedURI string) (string, error) { - if strings.HasSuffix(version, "-SNAPSHOT") && retrievedURI == "" { - return "", errors.New("snapshot upgrade requires source uri", errors.TypeConfig) - } if retrievedURI != "" { return retrievedURI, nil } diff --git a/x-pack/elastic-agent/pkg/agent/operation/common_test.go b/x-pack/elastic-agent/pkg/agent/operation/common_test.go index e9d40bece87..ea16cfe77b8 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/common_test.go +++ b/x-pack/elastic-agent/pkg/agent/operation/common_test.go @@ -13,6 +13,7 @@ import ( "time" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/program" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/stateresolver" @@ -48,6 +49,7 @@ func getTestOperator(t *testing.T, downloadPath string, installPath string, p *a } l := getLogger() + agentInfo, _ := info.NewAgentInfo() fetcher := &DummyDownloader{} verifier := &DummyVerifier{} @@ -67,7 +69,7 @@ func getTestOperator(t *testing.T, downloadPath string, installPath string, p *a t.Fatal(err) } - operator, err := NewOperator(context.Background(), l, "p1", operatorCfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, noop.NewMonitor()) + operator, err := NewOperator(context.Background(), l, agentInfo, "p1", operatorCfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, noop.NewMonitor()) if err != nil { t.Fatal(err) } diff --git a/x-pack/elastic-agent/pkg/agent/operation/monitoring.go b/x-pack/elastic-agent/pkg/agent/operation/monitoring.go index c4d895eb6ee..1959cd52818 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/monitoring.go +++ b/x-pack/elastic-agent/pkg/agent/operation/monitoring.go @@ -186,14 +186,14 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i "paths": []string{ filepath.Join(paths.Home(), "logs", "elastic-agent-json.log"), }, - "index": "logs-elastic.agent-default", + "index": "logs-elastic_agent-default", "processors": []map[string]interface{}{ { "add_fields": map[string]interface{}{ "target": "data_stream", "fields": map[string]interface{}{ "type": "logs", - "dataset": "elastic.agent", + "dataset": "elastic_agent", "namespace": "default", }, }, @@ -202,7 +202,17 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i "add_fields": map[string]interface{}{ "target": "event", "fields": map[string]interface{}{ - "dataset": "elastic.agent", + "dataset": "elastic_agent", + }, + }, + }, + { + "add_fields": map[string]interface{}{ + "target": "elastic_agent", + "fields": map[string]interface{}{ + "id": o.agentInfo.AgentID(), + "version": o.agentInfo.Version(), + "snapshot": o.agentInfo.Snapshot(), }, }, }, @@ -220,14 +230,14 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i "message_key": "message", }, "paths": paths, - "index": fmt.Sprintf("logs-elastic.agent.%s-default", name), + "index": fmt.Sprintf("logs-elastic_agent.%s-default", name), "processors": []map[string]interface{}{ { "add_fields": map[string]interface{}{ "target": "data_stream", "fields": map[string]interface{}{ "type": "logs", - "dataset": fmt.Sprintf("elastic.agent.%s", name), + "dataset": fmt.Sprintf("elastic_agent.%s", name), "namespace": "default", }, }, @@ -236,7 +246,17 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i "add_fields": map[string]interface{}{ "target": "event", "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic.agent.%s", name), + "dataset": fmt.Sprintf("elastic_agent.%s", name), + }, + }, + }, + { + "add_fields": map[string]interface{}{ + "target": "elastic_agent", + "fields": map[string]interface{}{ + "id": o.agentInfo.AgentID(), + "version": o.agentInfo.Version(), + "snapshot": o.agentInfo.Snapshot(), }, }, }, @@ -270,14 +290,14 @@ func (o *Operator) getMonitoringMetricbeatConfig(output interface{}) (map[string "metricsets": []string{"stats", "state"}, "period": "10s", "hosts": endpoints, - "index": fmt.Sprintf("metrics-elastic.agent.%s-default", name), + "index": fmt.Sprintf("metrics-elastic_agent.%s-default", name), "processors": []map[string]interface{}{ { "add_fields": map[string]interface{}{ "target": "data_stream", "fields": map[string]interface{}{ "type": "metrics", - "dataset": fmt.Sprintf("elastic.agent.%s", name), + "dataset": fmt.Sprintf("elastic_agent.%s", name), "namespace": "default", }, }, @@ -286,7 +306,17 @@ func (o *Operator) getMonitoringMetricbeatConfig(output interface{}) (map[string "add_fields": map[string]interface{}{ "target": "event", "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic.agent.%s", name), + "dataset": fmt.Sprintf("elastic_agent.%s", name), + }, + }, + }, + { + "add_fields": map[string]interface{}{ + "target": "elastic_agent", + "fields": map[string]interface{}{ + "id": o.agentInfo.AgentID(), + "version": o.agentInfo.Version(), + "snapshot": o.agentInfo.Snapshot(), }, }, }, diff --git a/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go b/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go index eef904096f7..3ca6a5f6b14 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go +++ b/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go @@ -11,6 +11,7 @@ import ( "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configrequest" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/stateresolver" @@ -112,6 +113,7 @@ func getMonitorableTestOperator(t *testing.T, installPath string, m monitoring.M } l := getLogger() + agentInfo, _ := info.NewAgentInfo() fetcher := &DummyDownloader{} verifier := &DummyVerifier{} @@ -128,7 +130,7 @@ func getMonitorableTestOperator(t *testing.T, installPath string, m monitoring.M } ctx := context.Background() - operator, err := NewOperator(ctx, l, "p1", cfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, m) + operator, err := NewOperator(ctx, l, agentInfo, "p1", cfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, m) if err != nil { t.Fatal(err) } diff --git a/x-pack/elastic-agent/pkg/agent/operation/operator.go b/x-pack/elastic-agent/pkg/agent/operation/operator.go index b4938278821..1a39e73500e 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/operator.go +++ b/x-pack/elastic-agent/pkg/agent/operation/operator.go @@ -12,6 +12,7 @@ import ( "sync" "time" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configrequest" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" @@ -43,6 +44,7 @@ type Operator struct { bgContext context.Context pipelineID string logger *logger.Logger + agentInfo *info.AgentInfo config *configuration.SettingsConfig handlers map[string]handleFunc stateResolver *stateresolver.StateResolver @@ -66,6 +68,7 @@ type Operator struct { func NewOperator( ctx context.Context, logger *logger.Logger, + agentInfo *info.AgentInfo, pipelineID string, config *configuration.SettingsConfig, fetcher download.Downloader, @@ -85,6 +88,7 @@ func NewOperator( config: config, pipelineID: pipelineID, logger: logger, + agentInfo: agentInfo, downloader: fetcher, verifier: verifier, installer: installer, diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml index 38b251d95dc..82a47adc999 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml @@ -17,11 +17,11 @@ filebeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false output: elasticsearch: enabled: true diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml index 6e768db6aa4..1406a2dff65 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml @@ -18,11 +18,11 @@ filebeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false output: elasticsearch: hosts: diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml index 01ee955e4ec..524d6451f28 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml @@ -19,11 +19,11 @@ filebeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false - type: log paths: - /var/log/hello3.log @@ -43,11 +43,11 @@ filebeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false output: elasticsearch: hosts: diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml index d09e80accf1..2889e7605eb 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml @@ -16,11 +16,11 @@ metricbeat: fields: dataset: docker.status - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false - module: docker metricsets: [info] index: metrics-generic-default @@ -37,11 +37,11 @@ metricbeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false - module: apache metricsets: [info] index: metrics-generic-testing @@ -61,11 +61,11 @@ metricbeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false output: elasticsearch: hosts: [127.0.0.1:9200, 127.0.0.1:9300] diff --git a/x-pack/elastic-agent/pkg/agent/transpiler/rules.go b/x-pack/elastic-agent/pkg/agent/transpiler/rules.go index 29ff1786d1e..42acd53d21a 100644 --- a/x-pack/elastic-agent/pkg/agent/transpiler/rules.go +++ b/x-pack/elastic-agent/pkg/agent/transpiler/rules.go @@ -715,11 +715,11 @@ func (r *InjectAgentInfoRule) Apply(agentInfo AgentInfo, ast *AST) error { // elastic.agent processorMap := &Dict{value: make([]Node, 0)} - processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "elastic"}}) + processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "elastic_agent"}}) processorMap.value = append(processorMap.value, &Key{name: "fields", value: &Dict{value: []Node{ - &Key{name: "agent.id", value: &StrVal{value: agentInfo.AgentID()}}, - &Key{name: "agent.version", value: &StrVal{value: agentInfo.Version()}}, - &Key{name: "agent.snapshot", value: &BoolVal{value: agentInfo.Snapshot()}}, + &Key{name: "id", value: &StrVal{value: agentInfo.AgentID()}}, + &Key{name: "version", value: &StrVal{value: agentInfo.Version()}}, + &Key{name: "snapshot", value: &BoolVal{value: agentInfo.Snapshot()}}, }}}) addFieldsMap := &Dict{value: []Node{&Key{"add_fields", processorMap}}} processorsList.value = mergeStrategy("").InjectItem(processorsList.value, addFieldsMap) diff --git a/x-pack/elastic-agent/pkg/agent/transpiler/rules_test.go b/x-pack/elastic-agent/pkg/agent/transpiler/rules_test.go index d92ba0de985..0fb59107844 100644 --- a/x-pack/elastic-agent/pkg/agent/transpiler/rules_test.go +++ b/x-pack/elastic-agent/pkg/agent/transpiler/rules_test.go @@ -184,11 +184,11 @@ inputs: type: file processors: - add_fields: - target: elastic + target: elastic_agent fields: - agent.id: agent-id - agent.snapshot: false - agent.version: 8.0.0 + id: agent-id + snapshot: false + version: 8.0.0 - name: With processors type: file processors: @@ -197,11 +197,11 @@ inputs: fields: data: more - add_fields: - target: elastic + target: elastic_agent fields: - agent.id: agent-id - agent.snapshot: false - agent.version: 8.0.0 + id: agent-id + snapshot: false + version: 8.0.0 `, rule: &RuleList{ Rules: []Rule{ diff --git a/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go b/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go index 6448af25aca..ba82195ffbd 100644 --- a/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go +++ b/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go @@ -17,12 +17,12 @@ import ( // NewDownloader creates a downloader which first checks local directory // and then fallbacks to remote if configured. -func NewDownloader(log *logger.Logger, config *artifact.Config) download.Downloader { +func NewDownloader(log *logger.Logger, config *artifact.Config, forceSnapshot bool) download.Downloader { downloaders := make([]download.Downloader, 0, 3) downloaders = append(downloaders, fs.NewDownloader(config)) // try snapshot repo before official - if release.Snapshot() { + if release.Snapshot() || forceSnapshot { snapDownloader, err := snapshot.NewDownloader(config) if err != nil { log.Error(err) diff --git a/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go b/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go index 4f33cbbdb8e..30517d12d3d 100644 --- a/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go +++ b/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go @@ -17,7 +17,7 @@ import ( // NewVerifier creates a downloader which first checks local directory // and then fallbacks to remote if configured. -func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool, pgp []byte) (download.Verifier, error) { +func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool, pgp []byte, forceSnapshot bool) (download.Verifier, error) { verifiers := make([]download.Verifier, 0, 3) fsVer, err := fs.NewVerifier(config, allowEmptyPgp, pgp) @@ -27,7 +27,7 @@ func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool verifiers = append(verifiers, fsVer) // try snapshot repo before official - if release.Snapshot() { + if release.Snapshot() || forceSnapshot { snapshotVerifier, err := snapshot.NewVerifier(config, allowEmptyPgp, pgp) if err != nil { log.Error(err) diff --git a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc index 5cbe4685cb8..3ea37b3c754 100644 --- a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc @@ -38,24 +38,32 @@ The `s3` input supports the following configuration options plus the <<{beatname_lc}-input-{type}-common-options>> described later. [float] -==== `queue_url` - -URL of the AWS SQS queue that messages will be received from. Required. - -[float] -==== `fips_enabled` - -Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. For example: `s3-fips.us-gov-east-1.amazonaws.com`. +==== `api_timeout` -[float] -==== `visibility_timeout` +The maximum duration of the AWS API call. If it exceeds the timeout, the AWS API +call will be interrupted. +The default AWS API call timeout for a message is 120 seconds. The minimum +is 0 seconds. The maximum is half of the visibility timeout value. -The duration that the received messages are hidden from subsequent -retrieve requests after being retrieved by a ReceiveMessage request. -This value needs to be a lot bigger than {beatname_uc} collection frequency so -if it took too long to read the s3 log, this sqs message will not be reprocessed. -The default visibility timeout for a message is 300 seconds. The minimum -is 0 seconds. The maximum is 12 hours. +["source","json"] +---- +{ + "Records": [ + { + "eventVersion": "1.07", + "eventTime": "2019-11-14T00:51:00Z", + "awsRegion": "us-east-1", + "eventID": "EXAMPLE8-9621-4d00-b913-beca2EXAMPLE", + }, + { + "eventVersion": "1.07", + "eventTime": "2019-11-14T00:52:00Z", + "awsRegion": "us-east-1", + "eventID": "EXAMPLEc-28be-486c-8928-49ce6EXAMPLE", + } + ] +} +---- [float] ==== `expand_event_list_from_field` @@ -93,40 +101,33 @@ file_selectors: - regex: '^AWSLogs/\d+/CloudTrail/' expand_event_list_from_field: 'Records' - regex: '^AWSLogs/\d+/CloudTrail-Digest' -``` ---- +[float] +==== `fips_enabled` + +Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. For example: `s3-fips.us-gov-east-1.amazonaws.com`. [float] -==== `api_timeout` +==== `max_number_of_messages` +The maximum number of messages to return. Amazon SQS never returns more messages +than this value (however, fewer messages might be returned). +Valid values: 1 to 10. Default: 5. -The maximum duration of AWS API can take. If it exceeds the timeout, AWS API -will be interrupted. -The default AWS API timeout for a message is 120 seconds. The minimum -is 0 seconds. The maximum is half of the visibility timeout value. +[float] +==== `queue_url` -["source","json"] ----- -{ - "Records": [ - { - "eventVersion": "1.07", - "eventTime": "2019-11-14T00:51:00Z", - "awsRegion": "us-east-1", - "eventID": "EXAMPLE8-9621-4d00-b913-beca2EXAMPLE", - ... - }, - { - "eventVersion": "1.07", - "eventTime": "2019-11-14T00:52:00Z", - "awsRegion": "us-east-1", - "eventID": "EXAMPLEc-28be-486c-8928-49ce6EXAMPLE", - ... - } - ] -} -``` ----- +URL of the AWS SQS queue that messages will be received from. Required. + +[float] +==== `visibility_timeout` + +The duration that the received messages are hidden from subsequent +retrieve requests after being retrieved by a ReceiveMessage request. +This value needs to be a lot bigger than {beatname_uc} collection frequency so +if it took too long to read the s3 log, this sqs message will not be reprocessed. +The default visibility timeout for a message is 300 seconds. The minimum +is 0 seconds. The maximum is 12 hours. [float] ==== `aws credentials` diff --git a/x-pack/filebeat/input/s3/collector.go b/x-pack/filebeat/input/s3/collector.go index 1b890513284..c3d3114c723 100644 --- a/x-pack/filebeat/input/s3/collector.go +++ b/x-pack/filebeat/input/s3/collector.go @@ -82,17 +82,11 @@ type s3Context struct { errC chan error } -var ( - // The maximum number of messages to return. Amazon SQS never returns more messages - // than this value (however, fewer messages might be returned). - maxNumberOfMessage uint8 = 10 - - // The duration (in seconds) for which the call waits for a message to arrive - // in the queue before returning. If a message is available, the call returns - // sooner than WaitTimeSeconds. If no messages are available and the wait time - // expires, the call returns successfully with an empty list of messages. - waitTimeSecond uint8 = 10 -) +// The duration (in seconds) for which the call waits for a message to arrive +// in the queue before returning. If a message is available, the call returns +// sooner than WaitTimeSeconds. If no messages are available and the wait time +// expires, the call returns successfully with an empty list of messages. +var waitTimeSecond uint8 = 10 func (c *s3Collector) run() { defer c.logger.Info("s3 input worker has stopped.") @@ -205,7 +199,7 @@ func (c *s3Collector) receiveMessage(svcSQS sqsiface.ClientAPI, visibilityTimeou &sqs.ReceiveMessageInput{ QueueUrl: &c.config.QueueURL, MessageAttributeNames: []string{"All"}, - MaxNumberOfMessages: awssdk.Int64(int64(maxNumberOfMessage)), + MaxNumberOfMessages: awssdk.Int64(int64(c.config.MaxNumberOfMessages)), VisibilityTimeout: &visibilityTimeout, WaitTimeSeconds: awssdk.Int64(int64(waitTimeSecond)), }) diff --git a/x-pack/filebeat/input/s3/config.go b/x-pack/filebeat/input/s3/config.go index cc3c5318289..6dc0746ce5f 100644 --- a/x-pack/filebeat/input/s3/config.go +++ b/x-pack/filebeat/input/s3/config.go @@ -13,13 +13,14 @@ import ( ) type config struct { + APITimeout time.Duration `config:"api_timeout"` + ExpandEventListFromField string `config:"expand_event_list_from_field"` + FileSelectors []FileSelectorCfg `config:"file_selectors"` + FipsEnabled bool `config:"fips_enabled"` + MaxNumberOfMessages int `config:"max_number_of_messages"` QueueURL string `config:"queue_url" validate:"nonzero,required"` VisibilityTimeout time.Duration `config:"visibility_timeout"` - FipsEnabled bool `config:"fips_enabled"` AwsConfig awscommon.ConfigAWS `config:",inline"` - ExpandEventListFromField string `config:"expand_event_list_from_field"` - APITimeout time.Duration `config:"api_timeout"` - FileSelectors []FileSelectorCfg `config:"file_selectors"` } // FileSelectorCfg defines type and configuration of FileSelectors @@ -31,9 +32,10 @@ type FileSelectorCfg struct { func defaultConfig() config { return config{ - VisibilityTimeout: 300 * time.Second, - APITimeout: 120 * time.Second, - FipsEnabled: false, + APITimeout: 120 * time.Second, + FipsEnabled: false, + MaxNumberOfMessages: 5, + VisibilityTimeout: 300 * time.Second, } } @@ -42,10 +44,12 @@ func (c *config) Validate() error { return fmt.Errorf("visibility timeout %v is not within the "+ "required range 0s to 12h", c.VisibilityTimeout) } + if c.APITimeout < 0 || c.APITimeout > c.VisibilityTimeout/2 { return fmt.Errorf("api timeout %v needs to be larger than"+ " 0s and smaller than half of the visibility timeout", c.APITimeout) } + for i := range c.FileSelectors { r, err := regexp.Compile(c.FileSelectors[i].RegexString) if err != nil { @@ -53,5 +57,9 @@ func (c *config) Validate() error { } c.FileSelectors[i].Regex = r } + + if c.MaxNumberOfMessages > 10 || c.MaxNumberOfMessages < 1 { + return fmt.Errorf(" max_number_of_messages %v needs to be between 1 and 10", c.MaxNumberOfMessages) + } return nil } diff --git a/x-pack/filebeat/input/s3/input.go b/x-pack/filebeat/input/s3/input.go index d76e5b8b728..36f160d759e 100644 --- a/x-pack/filebeat/input/s3/input.go +++ b/x-pack/filebeat/input/s3/input.go @@ -106,7 +106,7 @@ func (in *s3Input) createCollector(ctx v2.Context, pipeline beat.Pipeline) (*s3C } log.Debug("s3 service name = ", s3Servicename) - + log.Debug("s3 input config max_number_of_messages = ", in.config.MaxNumberOfMessages) return &s3Collector{ cancellation: ctxtool.FromCanceller(ctx.Cancelation), logger: log, diff --git a/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc b/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc index ecd8e0d3e81..385206f03ff 100644 --- a/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc +++ b/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc @@ -65,6 +65,18 @@ A list of tags to include in events. Including `forwarded` indicates that the events did not originate on this host and causes `host.name` to not be added to events. Defaults to `[checkpoint-firewall, forwarded]`. +*`var.ssl`*:: + +The SSL/TLS configuration for the filebeat instance. This can be used to enforce mutual TLS. +```yaml +ssl: + enabled: true + certificate_authorities: ["my-ca.pem"] + certificate: "filebeat-cert.pem" + key: "filebeat-key.pem" + client_authentication: "required" +``` + [float] ==== Check Point devices diff --git a/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml b/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml index 4892400a8b9..9ac586c6b5c 100644 --- a/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml +++ b/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml @@ -1,4 +1,10 @@ -{{ if eq .input "syslog" }} +{{ if .ssl }} + +type: tcp +host: "{{.syslog_host}}:{{.syslog_port}}" +ssl: {{ .ssl | tojson }} + +{{ else if eq .input "syslog" }} type: udp host: "{{.syslog_host}}:{{.syslog_port}}" diff --git a/x-pack/filebeat/module/checkpoint/firewall/manifest.yml b/x-pack/filebeat/module/checkpoint/firewall/manifest.yml index 849c20fafe2..69301541669 100644 --- a/x-pack/filebeat/module/checkpoint/firewall/manifest.yml +++ b/x-pack/filebeat/module/checkpoint/firewall/manifest.yml @@ -9,6 +9,7 @@ var: default: 9001 - name: input default: syslog + - name: ssl ingest_pipeline: - ingest/pipeline.yml diff --git a/x-pack/metricbeat/module/azure/storage/storage.go b/x-pack/metricbeat/module/azure/storage/storage.go index 9f54871b319..4178b911d11 100644 --- a/x-pack/metricbeat/module/azure/storage/storage.go +++ b/x-pack/metricbeat/module/azure/storage/storage.go @@ -41,6 +41,8 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { if err != nil { return nil, err } + // set default resource type to indicate this is not the generic monitor metricset + ms.Client.Config.DefaultResourceType = defaultStorageAccountNamespace // if no options are entered we will retrieve all the vm's from the entire subscription if len(ms.Client.Config.Resources) == 0 { ms.Client.Config.Resources = []azure.ResourceConfig{ diff --git a/x-pack/metricbeat/module/istio/_meta/docs.asciidoc b/x-pack/metricbeat/module/istio/_meta/docs.asciidoc index f3b1825a9b1..39eb93b4095 100644 --- a/x-pack/metricbeat/module/istio/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/istio/_meta/docs.asciidoc @@ -1,7 +1,7 @@ This is the Istio module. This module is compatible with versions before `1.5` of Istio where microservices architecture is used. If using -versions priot to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used. -where the Istio module collects metrics from the +versions prior to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used where the Istio +module collects metrics from the Istio https://istio.io/v1.4/docs/tasks/observability/metrics/querying-metrics/#about-the-prometheus-add-on[prometheus exporters endpoints]. For versions after `1.5`, `istiod` and `proxy` metricsets can be used.