diff --git a/filebeat/fileset/fileset.go b/filebeat/fileset/fileset.go index 6b0dc51fed94..e4f50407ac28 100644 --- a/filebeat/fileset/fileset.go +++ b/filebeat/fileset/fileset.go @@ -472,7 +472,7 @@ func (fs *Fileset) GetMLConfigs() []mlimporter.MLConfig { var mlConfigs []mlimporter.MLConfig for _, ml := range fs.manifest.MachineLearning { mlConfigs = append(mlConfigs, mlimporter.MLConfig{ - ID: fmt.Sprintf("filebeat-%s-%s-%s", fs.mcfg.Module, fs.name, ml.Name), + ID: fmt.Sprintf("filebeat-%s-%s-%s_ecs", fs.mcfg.Module, fs.name, ml.Name), JobPath: filepath.Join(fs.modulePath, fs.name, ml.Job), DatafeedPath: filepath.Join(fs.modulePath, fs.name, ml.Datafeed), MinVersion: ml.MinVersion, diff --git a/filebeat/fileset/modules.go b/filebeat/fileset/modules.go index fabdf00ee037..b487461c6557 100644 --- a/filebeat/fileset/modules.go +++ b/filebeat/fileset/modules.go @@ -36,8 +36,8 @@ import ( ) var availableMLModules = map[string]string{ - "apache2": "access", - "nginx": "access", + "apache": "access", + "nginx": "access", } type ModuleRegistry struct { @@ -427,6 +427,11 @@ func (reg *ModuleRegistry) SetupML(esClient PipelineLoader, kibanaClient *kibana } for module, fileset := range modules { + // XXX workaround to setup modules after changing the module IDs due to ECS migration + // the proper solution would be to query available modules, and setup the required ones + // related issue: https://github.com/elastic/kibana/issues/30934 + module = module + "_ecs" + prefix := fmt.Sprintf("filebeat-%s-%s-", module, fileset) err := mlimporter.SetupModule(kibanaClient, module, prefix) if err != nil { diff --git a/filebeat/tests/system/test_ml.py b/filebeat/tests/system/test_ml.py index e182318d8b30..0e3e11b679b1 100644 --- a/filebeat/tests/system/test_ml.py +++ b/filebeat/tests/system/test_ml.py @@ -119,11 +119,11 @@ def _run_ml_test(self, modules_flag): bufsize=0) # Check result - self.wait_until(lambda: "filebeat-nginx-access-response_code" in + self.wait_until(lambda: "filebeat-nginx_ecs-access-status_code_rate_ecs" in (df["job_id"] for df in self.es.transport.perform_request( "GET", ml_anomaly_detectors_url)["jobs"]), max_timeout=60) - self.wait_until(lambda: "datafeed-filebeat-nginx-access-response_code" in + self.wait_until(lambda: "datafeed-filebeat-nginx_ecs-access-status_code_rate_ecs" in (df["datafeed_id"] for df in self.es.transport.perform_request("GET", ml_datafeeds_url)["datafeeds"])) beat.kill()