diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 8e5cd3497e1b..a44e4ea37f9a 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -157,6 +157,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Metricbeat* +- Fix `namespace` filter option at Kubernetes provider level. {pull}39881[39881] - Fix Azure Monitor 429 error by causing metricbeat to retry the request again. {pull}38294[38294] - Fix fields not being parsed correctly in postgresql/database {issue}25301[25301] {pull}37720[37720] - rabbitmq/queue - Change the mapping type of `rabbitmq.queue.consumers.utilisation.pct` to `scaled_float` from `long` because the values fall within the range of `[0.0, 1.0]`. Previously, conversion to integer resulted in reporting either `0` or `1`. diff --git a/libbeat/autodiscover/providers/kubernetes/pod.go b/libbeat/autodiscover/providers/kubernetes/pod.go index b11faac4931d..c5f9c721eb90 100644 --- a/libbeat/autodiscover/providers/kubernetes/pod.go +++ b/libbeat/autodiscover/providers/kubernetes/pod.go @@ -100,9 +100,9 @@ func NewPodEventer(uuid uuid.UUID, cfg *conf.C, client k8s.Interface, publish fu if metaConf.Node.Enabled() || config.Hints.Enabled() { options := kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, - Node: config.Node, - Namespace: config.Namespace, + SyncTimeout: config.SyncPeriod, + Node: config.Node, + HonorReSyncs: true, } nodeWatcher, err = kubernetes.NewNamedWatcher("node", client, &kubernetes.Node{}, options, nil) if err != nil { @@ -112,20 +112,24 @@ func NewPodEventer(uuid uuid.UUID, cfg *conf.C, client k8s.Interface, publish fu if metaConf.Namespace.Enabled() || config.Hints.Enabled() { namespaceWatcher, err = kubernetes.NewNamedWatcher("namespace", client, &kubernetes.Namespace{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, + SyncTimeout: config.SyncPeriod, + Namespace: config.Namespace, + HonorReSyncs: true, }, nil) if err != nil { logger.Errorf("couldn't create watcher for %T due to error %+v", &kubernetes.Namespace{}, err) } } - // Resource is Pod so we need to create watchers for Replicasets and Jobs that it might belongs to + // Resource is Pod, so we need to create watchers for Replicasets and Jobs that it might belong to // in order to be able to retrieve 2nd layer Owner metadata like in case of: // Deployment -> Replicaset -> Pod // CronJob -> job -> Pod if metaConf.Deployment { replicaSetWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_rs", client, &kubernetes.ReplicaSet{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, + SyncTimeout: config.SyncPeriod, + Namespace: config.Namespace, + HonorReSyncs: true, }, nil) if err != nil { logger.Errorf("Error creating watcher for %T due to error %+v", &kubernetes.ReplicaSet{}, err) @@ -133,7 +137,9 @@ func NewPodEventer(uuid uuid.UUID, cfg *conf.C, client k8s.Interface, publish fu } if metaConf.CronJob { jobWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_job", client, &kubernetes.Job{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, + SyncTimeout: config.SyncPeriod, + Namespace: config.Namespace, + HonorReSyncs: true, }, nil) if err != nil { logger.Errorf("Error creating watcher for %T due to error %+v", &kubernetes.Job{}, err) diff --git a/libbeat/autodiscover/providers/kubernetes/service.go b/libbeat/autodiscover/providers/kubernetes/service.go index ba62dda9c47c..e9e71c921bd8 100644 --- a/libbeat/autodiscover/providers/kubernetes/service.go +++ b/libbeat/autodiscover/providers/kubernetes/service.go @@ -74,8 +74,9 @@ func NewServiceEventer(uuid uuid.UUID, cfg *conf.C, client k8s.Interface, publis if metaConf.Namespace.Enabled() || config.Hints.Enabled() { namespaceWatcher, err = kubernetes.NewNamedWatcher("namespace", client, &kubernetes.Namespace{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, - Namespace: config.Namespace, + SyncTimeout: config.SyncPeriod, + Namespace: config.Namespace, + HonorReSyncs: true, }, nil) if err != nil { return nil, fmt.Errorf("couldn't create watcher for %T due to error %w", &kubernetes.Namespace{}, err) diff --git a/libbeat/docs/shared-autodiscover.asciidoc b/libbeat/docs/shared-autodiscover.asciidoc index 1e1ea567b7cc..83d44b498ac2 100644 --- a/libbeat/docs/shared-autodiscover.asciidoc +++ b/libbeat/docs/shared-autodiscover.asciidoc @@ -140,10 +140,7 @@ The `kubernetes` autodiscover provider has the following configuration settings: `node`:: (Optional) Specify the node to scope {beatname_lc} to in case it cannot be accurately detected, as when running {beatname_lc} in host network mode. -`namespace`:: (Optional) Select the namespace from which to collect the - metadata. If it is not set, the processor collects metadata from all - namespaces. It is unset by default. The namespace configuration only applies to - kubernetes resources that are namespace scoped. +`namespace`:: (Optional) Select the namespace from which to collect the events from the resources. If it is not set, the provider collects them from all namespaces. It is unset by default. The namespace configuration only applies to kubernetes resources that are namespace scoped and if `unique` field is set to `false`. `cleanup_timeout`:: (Optional) Specify the time of inactivity before stopping the running configuration for a container, ifeval::["{beatname_lc}"=="filebeat"] @@ -196,7 +193,7 @@ Example: `unique`:: (Optional) Defaults to `false`. Marking an autodiscover provider as unique results into making the provider to enable the provided templates only when it will gain the leader lease. - This setting can only be combined with `cluster` scope. When `unique` is enabled enabled, `resource` + This setting can only be combined with `cluster` scope. When `unique` is enabled, `resource` and `add_resource_metadata` settings are not taken into account. `leader_lease`:: (Optional) Defaults to +{beatname_lc}-cluster-leader+. This will be name of the lock lease. One can monitor the status of the lease with `kubectl describe lease beats-cluster-leader`.