diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java index c6d5cd91e7ecb..09cdc8b269ad3 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java @@ -159,18 +159,18 @@ public void setup() { @Benchmark @OperationsPerInvocation(AggregatorBenchmark.BLOCK_LENGTH) public void adaptive() { - MultivalueDedupe.dedupeToBlockAdaptive(Block.Ref.floating(block), BlockFactory.getNonBreakingInstance()).close(); + MultivalueDedupe.dedupeToBlockAdaptive(block, BlockFactory.getNonBreakingInstance()).close(); } @Benchmark @OperationsPerInvocation(AggregatorBenchmark.BLOCK_LENGTH) public void copyAndSort() { - MultivalueDedupe.dedupeToBlockUsingCopyAndSort(Block.Ref.floating(block), BlockFactory.getNonBreakingInstance()).close(); + MultivalueDedupe.dedupeToBlockUsingCopyAndSort(block, BlockFactory.getNonBreakingInstance()).close(); } @Benchmark @OperationsPerInvocation(AggregatorBenchmark.BLOCK_LENGTH) public void copyMissing() { - MultivalueDedupe.dedupeToBlockUsingCopyMissing(Block.Ref.floating(block), BlockFactory.getNonBreakingInstance()).close(); + MultivalueDedupe.dedupeToBlockUsingCopyMissing(block, BlockFactory.getNonBreakingInstance()).close(); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java index 40edc0b8b9b7f..4f8d4018ffdac 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java @@ -238,7 +238,9 @@ public void benchmark() { ValuesSourceReaderOperator op = new ValuesSourceReaderOperator( BlockFactory.getNonBreakingInstance(), fields(name), - List.of(reader), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> { + throw new UnsupportedOperationException("can't load _source here"); + })), 0 ); long sum = 0; diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index c134638bcd6b6..66001e66f2486 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -35,6 +35,10 @@ gradlePlugin { id = 'elasticsearch.build' implementationClass = 'org.elasticsearch.gradle.internal.BuildPlugin' } + buildComplete { + id = 'elasticsearch.build-complete' + implementationClass = 'org.elasticsearch.gradle.internal.ElasticsearchBuildCompletePlugin' + } distro { id = 'elasticsearch.distro' implementationClass = 'org.elasticsearch.gradle.internal.distribution.ElasticsearchDistributionPlugin' @@ -158,7 +162,7 @@ gradlePlugin { stringTemplate { id = 'elasticsearch.string-templates' implementationClass = 'org.elasticsearch.gradle.internal.StringTemplatePlugin' - } + } testFixtures { id = 'elasticsearch.test.fixtures' implementationClass = 'org.elasticsearch.gradle.internal.testfixtures.TestFixturesPlugin' @@ -266,6 +270,8 @@ dependencies { api buildLibs.apache.rat api buildLibs.jna api buildLibs.shadow.plugin + api buildLibs.gradle.enterprise + // for our ide tweaking api buildLibs.idea.ext // When upgrading forbidden apis, ensure dependency version is bumped in ThirdPartyPrecommitPlugin as well @@ -280,6 +286,7 @@ dependencies { api buildLibs.asm.tree api buildLibs.httpclient api buildLibs.httpcore + compileOnly buildLibs.checkstyle runtimeOnly "org.elasticsearch.gradle:reaper:$version" testImplementation buildLibs.checkstyle diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle deleted file mode 100644 index 1a0afe6d7d344..0000000000000 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -import org.elasticsearch.gradle.util.GradleUtils - -import java.nio.file.Files - -String buildNumber = System.getenv('BUILD_NUMBER') ?: System.getenv('BUILDKITE_BUILD_NUMBER') -String performanceTest = System.getenv('BUILD_PERFORMANCE_TEST') -Boolean isNested = System.getProperty("scan.tag.NESTED") != null - -if (buildNumber && performanceTest == null && GradleUtils.isIncludedBuild(project) == false && isNested == false) { - def uploadFilePath = "build/${buildNumber}.tar.bz2" - File uploadFile = file(uploadFilePath) - project.gradle.buildFinished { result -> - println "build complete, generating: $uploadFile" - if (uploadFile.exists()) { - project.delete(uploadFile) - } - - try { - ant.tar(destfile: uploadFile, compression: "bzip2", longfile: "gnu") { - fileset(dir: projectDir) { - Set fileSet = fileTree(projectDir) { - include("**/*.hprof") - include("**/build/test-results/**/*.xml") - include("**/build/testclusters/**") - include("**/build/testrun/*/temp/**") - include("**/build/**/hs_err_pid*.log") - exclude("**/build/testclusters/**/data/**") - exclude("**/build/testclusters/**/distro/**") - exclude("**/build/testclusters/**/repo/**") - exclude("**/build/testclusters/**/extract/**") - exclude("**/build/testclusters/**/tmp/**") - exclude("**/build/testrun/*/temp/**/data/**") - exclude("**/build/testrun/*/temp/**/distro/**") - exclude("**/build/testrun/*/temp/**/repo/**") - exclude("**/build/testrun/*/temp/**/extract/**") - exclude("**/build/testrun/*/temp/**/tmp/**") - } - .files - .findAll { Files.isRegularFile(it.toPath()) } - - if (fileSet.empty) { - // In cases where we don't match any workspace files, exclude everything - ant.exclude(name: "**/*") - } else { - fileSet.each { - ant.include(name: projectDir.toPath().relativize(it.toPath())) - } - } - } - - fileset(dir: "${gradle.gradleUserHomeDir}/daemon/${gradle.gradleVersion}", followsymlinks: false) { - include(name: "**/daemon-${ProcessHandle.current().pid()}*.log") - } - - fileset(dir: "${gradle.gradleUserHomeDir}/workers", followsymlinks: false) - - fileset(dir: "${project.projectDir}/.gradle/reaper", followsymlinks: false, erroronmissingdir: false) - } - } catch (Exception e) { - logger.lifecycle("Failed to archive additional logs", e) - } - - if (uploadFile.exists() && System.getenv("BUILDKITE") == "true") { - try { - println "Uploading buildkite artifact: ${uploadFilePath}..." - new ProcessBuilder("buildkite-agent", "artifact", "upload", uploadFilePath) - .start() - .waitFor() - - println "Generating buildscan link for artifact..." - - def process = new ProcessBuilder("buildkite-agent", "artifact", "search", uploadFilePath, "--step", System.getenv('BUILDKITE_JOB_ID'), "--format", "%i").start() - process.waitFor() - def artifactUuid = (process.text ?: "").trim() - - println "Artifact UUID: ${artifactUuid}" - if (artifactUuid) { - buildScan.link 'Artifact Upload', "https://buildkite.com/organizations/elastic/pipelines/${System.getenv('BUILDKITE_PIPELINE_SLUG')}/builds/${buildNumber}/jobs/${System.getenv('BUILDKITE_JOB_ID')}/artifacts/${artifactUuid}" - } - } catch (Exception e) { - logger.lifecycle("Failed to upload buildkite artifact", e) - } - } - } -} diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle index f1bd3017ced68..c46da4bb0b950 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle @@ -126,6 +126,7 @@ buildScan { } buildFinished { result -> + buildScanPublished { scan -> // Attach build scan link as build metadata // See: https://buildkite.com/docs/pipelines/build-meta-data diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java new file mode 100644 index 0000000000000..4902168d9b4ff --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java @@ -0,0 +1,220 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal; + +import com.gradle.scan.plugin.BuildScanExtension; + +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; +import org.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream; +import org.apache.commons.io.IOUtils; +import org.elasticsearch.gradle.util.GradleUtils; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.file.FileSystemOperations; +import org.gradle.api.flow.FlowAction; +import org.gradle.api.flow.FlowParameters; +import org.gradle.api.flow.FlowProviders; +import org.gradle.api.flow.FlowScope; +import org.gradle.api.internal.file.FileOperations; +import org.gradle.api.provider.ListProperty; +import org.gradle.api.provider.Property; +import org.gradle.api.tasks.Input; +import org.jetbrains.annotations.NotNull; + +import java.io.*; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; + +import javax.inject.Inject; + +public abstract class ElasticsearchBuildCompletePlugin implements Plugin { + + @Inject + protected abstract FlowScope getFlowScope(); + + @Inject + protected abstract FlowProviders getFlowProviders(); + + @Inject + protected abstract FileOperations getFileOperations(); + + @Override + public void apply(Project target) { + String buildNumber = System.getenv("BUILD_NUMBER") != null + ? System.getenv("BUILD_NUMBER") + : System.getenv("BUILDKITE_BUILD_NUMBER"); + String performanceTest = System.getenv("BUILD_PERFORMANCE_TEST"); + if (buildNumber != null && performanceTest == null && GradleUtils.isIncludedBuild(target) == false) { + File targetFile = target.file("build/" + buildNumber + ".tar.bz2"); + File projectDir = target.getProjectDir(); + File gradleWorkersDir = new File(target.getGradle().getGradleUserHomeDir(), "workers/"); + BuildScanExtension extension = target.getExtensions().getByType(BuildScanExtension.class); + File daemonsLogDir = new File(target.getGradle().getGradleUserHomeDir(), "daemon/" + target.getGradle().getGradleVersion()); + + getFlowScope().always(BuildFinishedFlowAction.class, spec -> { + spec.getParameters().getBuildScan().set(extension); + spec.getParameters().getUploadFile().set(targetFile); + spec.getParameters().getProjectDir().set(projectDir); + spec.getParameters().getFilteredFiles().addAll(getFlowProviders().getBuildWorkResult().map((result) -> { + System.out.println("Build Finished Action: Collecting archive files..."); + List files = new ArrayList<>(); + files.addAll(resolveProjectLogs(projectDir)); + if (files.isEmpty() == false) { + files.addAll(resolveDaemonLogs(daemonsLogDir)); + files.addAll(getFileOperations().fileTree(gradleWorkersDir).getFiles()); + files.addAll(getFileOperations().fileTree(new File(projectDir, ".gradle/reaper/")).getFiles()); + } + return files; + })); + }); + } + } + + private List resolveProjectLogs(File projectDir) { + var projectDirFiles = getFileOperations().fileTree(projectDir); + projectDirFiles.include("**/*.hprof"); + projectDirFiles.include("**/build/test-results/**/*.xml"); + projectDirFiles.include("**/build/testclusters/**"); + projectDirFiles.include("**/build/testrun/*/temp/**"); + projectDirFiles.include("**/build/**/hs_err_pid*.log"); + projectDirFiles.exclude("**/build/testclusters/**/data/**"); + projectDirFiles.exclude("**/build/testclusters/**/distro/**"); + projectDirFiles.exclude("**/build/testclusters/**/repo/**"); + projectDirFiles.exclude("**/build/testclusters/**/extract/**"); + projectDirFiles.exclude("**/build/testclusters/**/tmp/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/data/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/distro/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/repo/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/extract/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/tmp/**"); + return projectDirFiles.getFiles().stream().filter(f -> Files.isRegularFile(f.toPath())).toList(); + } + + private List resolveDaemonLogs(File daemonsLogDir) { + var gradleDaemonFileSet = getFileOperations().fileTree(daemonsLogDir); + gradleDaemonFileSet.include("**/daemon-" + ProcessHandle.current().pid() + "*.log"); + return gradleDaemonFileSet.getFiles().stream().filter(f -> Files.isRegularFile(f.toPath())).toList(); + } + + public abstract static class BuildFinishedFlowAction implements FlowAction { + interface Parameters extends FlowParameters { + @Input + Property getUploadFile(); + + @Input + Property getProjectDir(); + + @Input + ListProperty getFilteredFiles(); + + @Input + Property getBuildScan(); + + } + + @Inject + protected abstract FileSystemOperations getFileSystemOperations(); + + @SuppressWarnings("checkstyle:DescendantToken") + @Override + public void execute(BuildFinishedFlowAction.Parameters parameters) throws FileNotFoundException { + File uploadFile = parameters.getUploadFile().get(); + if (uploadFile.exists()) { + getFileSystemOperations().delete(spec -> spec.delete(uploadFile)); + } + uploadFile.getParentFile().mkdirs(); + createBuildArchiveTar(parameters.getFilteredFiles().get(), parameters.getProjectDir().get(), uploadFile); + if (uploadFile.exists() && System.getenv("BUILDKITE").equals("true")) { + String uploadFilePath = "build/" + uploadFile.getName(); + try { + System.out.println("Uploading buildkite artifact: " + uploadFilePath + "..."); + new ProcessBuilder("buildkite-agent", "artifact", "upload", uploadFilePath).start().waitFor(); + + System.out.println("Generating buildscan link for artifact..."); + + Process process = new ProcessBuilder( + "buildkite-agent", + "artifact", + "search", + uploadFilePath, + "--step", + System.getenv("BUILDKITE_JOB_ID"), + "--format", + "%i" + ).start(); + process.waitFor(); + String processOutput; + try { + processOutput = IOUtils.toString(process.getInputStream()); + } catch (IOException e) { + processOutput = ""; + } + String artifactUuid = processOutput.trim(); + + System.out.println("Artifact UUID: " + artifactUuid); + if (artifactUuid.isEmpty() == false) { + String buildkitePipelineSlug = System.getenv("BUILDKITE_PIPELINE_SLUG"); + String targetLink = "https://buildkite.com/organizations/elastic/pipelines/" + + buildkitePipelineSlug + + "/builds/" + + System.getenv("BUILD_NUMBER") + + "/jobs/" + + System.getenv("BUILDKITE_JOB_ID") + + "/artifacts/" + + artifactUuid; + parameters.getBuildScan().get().link("Artifact Upload", targetLink); + } + } catch (Exception e) { + System.out.println("Failed to upload buildkite artifact " + e.getMessage()); + } + } + + } + + private static void createBuildArchiveTar(List files, File projectDir, File uploadFile) { + try ( + OutputStream fOut = Files.newOutputStream(uploadFile.toPath()); + BufferedOutputStream buffOut = new BufferedOutputStream(fOut); + BZip2CompressorOutputStream bzOut = new BZip2CompressorOutputStream(buffOut); + TarArchiveOutputStream tOut = new TarArchiveOutputStream(bzOut) + ) { + Path projectPath = projectDir.toPath(); + tOut.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU); + tOut.setBigNumberMode(TarArchiveOutputStream.BIGNUMBER_STAR); + for (Path path : files.stream().map(File::toPath).toList()) { + if (!Files.isRegularFile(path)) { + throw new IOException("Support only file!"); + } + + TarArchiveEntry tarEntry = new TarArchiveEntry(path.toFile(), calculateArchivePath(path, projectPath)); + tarEntry.setSize(Files.size(path)); + tOut.putArchiveEntry(tarEntry); + + // copy file to TarArchiveOutputStream + Files.copy(path, tOut); + tOut.closeArchiveEntry(); + + } + tOut.flush(); + tOut.finish(); + + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @NotNull + private static String calculateArchivePath(Path path, Path projectPath) { + return path.startsWith(projectPath) ? projectPath.relativize(path).toString() : path.getFileName().toString(); + } + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java index 7a5bead71fb0e..4f9a7284c83e1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java @@ -91,7 +91,6 @@ private static void disableTransitiveDependenciesForSourceSet(Project project, S List sourceSetConfigurationNames = List.of( sourceSet.getApiConfigurationName(), sourceSet.getImplementationConfigurationName(), - sourceSet.getImplementationConfigurationName(), sourceSet.getCompileOnlyConfigurationName(), sourceSet.getRuntimeOnlyConfigurationName() ); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmptyDirTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmptyDirTask.java index 867ccb203de0d..15a224b0ff206 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmptyDirTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/EmptyDirTask.java @@ -9,7 +9,7 @@ import org.gradle.api.DefaultTask; import org.gradle.api.tasks.Input; -import org.gradle.api.tasks.Internal; +import org.gradle.api.tasks.OutputDirectory; import org.gradle.api.tasks.TaskAction; import org.gradle.internal.file.Chmod; @@ -39,7 +39,7 @@ public Chmod getChmod() { throw new UnsupportedOperationException(); } - @Internal + @OutputDirectory public File getDir() { return dir; } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java index bb0b8dcf04437..d69a355a3595d 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java @@ -10,6 +10,7 @@ import de.thetaphi.forbiddenapis.Checker; import de.thetaphi.forbiddenapis.Constants; +import de.thetaphi.forbiddenapis.ForbiddenApiException; import de.thetaphi.forbiddenapis.Logger; import de.thetaphi.forbiddenapis.ParseException; import groovy.lang.Closure; @@ -43,6 +44,7 @@ import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.SkipWhenEmpty; import org.gradle.api.tasks.TaskAction; +import org.gradle.api.tasks.VerificationException; import org.gradle.api.tasks.VerificationTask; import org.gradle.api.tasks.util.PatternFilterable; import org.gradle.api.tasks.util.PatternSet; @@ -469,6 +471,8 @@ public void execute() { } checker.run(); writeMarker(getParameters().getSuccessMarker().getAsFile().get()); + } catch (ForbiddenApiException e) { + throw new VerificationException("Forbidden API verification failed", e); } catch (Exception e) { throw new RuntimeException(e); } finally { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index 566e93d8a3f53..a7e72b55f9117 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -113,7 +113,7 @@ public void apply(Project project) { configureArtifactTransforms(project); // Create configuration for aggregating historical feature metadata - Configuration featureMetadataConfig = project.getConfigurations().create(FEATURES_METADATA_CONFIGURATION, c -> { + FileCollection featureMetadataConfig = project.getConfigurations().create(FEATURES_METADATA_CONFIGURATION, c -> { c.setCanBeConsumed(false); c.setCanBeResolved(true); c.attributes( @@ -127,7 +127,7 @@ public void apply(Project project) { }); }); - Configuration defaultDistroFeatureMetadataConfig = project.getConfigurations() + FileCollection defaultDistroFeatureMetadataConfig = project.getConfigurations() .create(DEFAULT_DISTRO_FEATURES_METADATA_CONFIGURATION, c -> { c.setCanBeConsumed(false); c.setCanBeResolved(true); diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index a8a019c2e0132..98d3ad1eff10b 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -41,6 +41,12 @@ junit5 = 5.7.1 hamcrest = 2.1 mocksocket = 1.2 +# test container dependencies +testcontainer = 1.19.2 +dockerJava = 3.3.4 +ductTape = 1.0.8 +commonsCompress = 1.24.0 + # benchmark dependencies jmh = 1.26 diff --git a/build.gradle b/build.gradle index acd8d6788318f..c0b613beefea4 100644 --- a/build.gradle +++ b/build.gradle @@ -29,8 +29,8 @@ plugins { id 'lifecycle-base' id 'elasticsearch.docker-support' id 'elasticsearch.global-build-info' - id 'elasticsearch.build-scan' id 'elasticsearch.build-complete' + id 'elasticsearch.build-scan' id 'elasticsearch.jdk-download' id 'elasticsearch.internal-distribution-download' id 'elasticsearch.runtime-jdk-provision' diff --git a/docs/changelog/100570.yaml b/docs/changelog/100570.yaml new file mode 100644 index 0000000000000..b68a905b0e046 --- /dev/null +++ b/docs/changelog/100570.yaml @@ -0,0 +1,5 @@ +pr: 100570 +summary: Added metric for cache eviction of entries with non zero frequency +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/102165.yaml b/docs/changelog/102165.yaml new file mode 100644 index 0000000000000..e1c4c76f1f6ff --- /dev/null +++ b/docs/changelog/102165.yaml @@ -0,0 +1,6 @@ +pr: 102165 +summary: Fix planning of duplicate aggs +area: ES|QL +type: bug +issues: + - 102083 diff --git a/docs/changelog/102391.yaml b/docs/changelog/102391.yaml new file mode 100644 index 0000000000000..5fcbb9e6d2858 --- /dev/null +++ b/docs/changelog/102391.yaml @@ -0,0 +1,5 @@ +pr: 102391 +summary: "ESQL: Support the `_source` metadata field" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102396.yaml b/docs/changelog/102396.yaml new file mode 100644 index 0000000000000..9ea53ca5b6840 --- /dev/null +++ b/docs/changelog/102396.yaml @@ -0,0 +1,5 @@ +pr: 102396 +summary: Add more logging to the real memory circuit breaker and lower minimum interval +area: "Infra/Circuit Breakers" +type: bug +issues: [] diff --git a/docs/changelog/102434.yaml b/docs/changelog/102434.yaml new file mode 100644 index 0000000000000..ab6aa886c13b1 --- /dev/null +++ b/docs/changelog/102434.yaml @@ -0,0 +1,5 @@ +pr: 102434 +summary: "ESQL: Short circuit loading empty doc values" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/102462.yaml b/docs/changelog/102462.yaml new file mode 100644 index 0000000000000..d44ccc4cbbc5c --- /dev/null +++ b/docs/changelog/102462.yaml @@ -0,0 +1,5 @@ +pr: 102462 +summary: Check the real memory circuit breaker when building global ordinals +area: Aggregations +type: enhancement +issues: [] diff --git a/gradle/build.versions.toml b/gradle/build.versions.toml index 94ed94df43818..e8d94ce624dbb 100644 --- a/gradle/build.versions.toml +++ b/gradle/build.versions.toml @@ -17,6 +17,7 @@ commons-codec = "commons-codec:commons-codec:1.11" commmons-io = "commons-io:commons-io:2.2" docker-compose = "com.avast.gradle:gradle-docker-compose-plugin:0.17.5" forbiddenApis = "de.thetaphi:forbiddenapis:3.6" +gradle-enterprise = "com.gradle:gradle-enterprise-gradle-plugin:3.14.1" hamcrest = "org.hamcrest:hamcrest:2.1" httpcore = "org.apache.httpcomponents:httpcore:4.4.12" httpclient = "org.apache.httpcomponents:httpclient:4.5.10" diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 246b604a6fc2b..77c39e77a88f6 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -381,6 +381,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -4072,6 +4092,11 @@ + + + + + @@ -4132,6 +4157,11 @@ + + + + + @@ -4162,6 +4192,11 @@ + + + + + diff --git a/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java b/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java index 7d25b5a6163c1..5153ba688d6a9 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java +++ b/libs/core/src/main/java/org/elasticsearch/core/RestApiVersion.java @@ -18,6 +18,8 @@ public enum RestApiVersion { V_8(8), + + @UpdateForV9 // v9 will not need to support the v7 REST API V_7(7); public final byte major; diff --git a/modules/build.gradle b/modules/build.gradle index ad7049a9905f0..7707b60b38b25 100644 --- a/modules/build.gradle +++ b/modules/build.gradle @@ -8,7 +8,7 @@ configure(subprojects.findAll { it.parent.path == project.path }) { group = 'org.elasticsearch.plugin' // for modules which publish client jars - apply plugin: 'elasticsearch.internal-testclusters' + // apply plugin: 'elasticsearch.internal-testclusters' apply plugin: 'elasticsearch.internal-es-plugin' apply plugin: 'elasticsearch.internal-test-artifact' diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/DataStreamLifecycleStatsResponseTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/DataStreamLifecycleStatsResponseTests.java index 7242f72fe9f68..111d1b61da8c9 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/DataStreamLifecycleStatsResponseTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/action/DataStreamLifecycleStatsResponseTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.datastreams.lifecycle.action; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentHelper; @@ -26,8 +25,8 @@ import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102373") public class DataStreamLifecycleStatsResponseTests extends AbstractWireSerializingTestCase { @Override @@ -118,16 +117,27 @@ public void testXContentSerialization() throws IOException { } }); Map xContentMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); - assertThat(xContentMap.get("last_run_duration_in_millis"), is(testInstance.getRunDuration().intValue())); - assertThat( - xContentMap.get("last_run_duration"), - is(TimeValue.timeValueMillis(testInstance.getRunDuration()).toHumanReadableString(2)) - ); - assertThat(xContentMap.get("time_between_starts_in_millis"), is(testInstance.getTimeBetweenStarts().intValue())); - assertThat( - xContentMap.get("time_between_starts"), - is(TimeValue.timeValueMillis(testInstance.getTimeBetweenStarts()).toHumanReadableString(2)) - ); + if (testInstance.getRunDuration() == null) { + assertThat(xContentMap.get("last_run_duration_in_millis"), nullValue()); + assertThat(xContentMap.get("last_run_duration"), nullValue()); + } else { + assertThat(xContentMap.get("last_run_duration_in_millis"), is(testInstance.getRunDuration().intValue())); + assertThat( + xContentMap.get("last_run_duration"), + is(TimeValue.timeValueMillis(testInstance.getRunDuration()).toHumanReadableString(2)) + ); + } + + if (testInstance.getTimeBetweenStarts() == null) { + assertThat(xContentMap.get("time_between_starts_in_millis"), nullValue()); + assertThat(xContentMap.get("time_between_starts"), nullValue()); + } else { + assertThat(xContentMap.get("time_between_starts_in_millis"), is(testInstance.getTimeBetweenStarts().intValue())); + assertThat( + xContentMap.get("time_between_starts"), + is(TimeValue.timeValueMillis(testInstance.getTimeBetweenStarts()).toHumanReadableString(2)) + ); + } assertThat(xContentMap.get("data_stream_count"), is(testInstance.getDataStreamStats().size())); List> dataStreams = (List>) xContentMap.get("data_streams"); if (testInstance.getDataStreamStats().isEmpty()) { diff --git a/modules/repository-s3/build.gradle b/modules/repository-s3/build.gradle index 3fb236e1d867f..645a989872f49 100644 --- a/modules/repository-s3/build.gradle +++ b/modules/repository-s3/build.gradle @@ -1,11 +1,7 @@ import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.internal.info.BuildParams -import org.elasticsearch.gradle.internal.test.RestIntegTestTask -import org.elasticsearch.gradle.internal.test.rest.LegacyYamlRestTestPlugin import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin -import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE - /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License @@ -13,7 +9,7 @@ import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { @@ -46,6 +42,12 @@ dependencies { api 'javax.xml.bind:jaxb-api:2.2.2' testImplementation project(':test:fixtures:s3-fixture') + yamlRestTestImplementation project(":test:framework") + yamlRestTestImplementation project(':test:fixtures:s3-fixture') + yamlRestTestImplementation project(':test:fixtures:minio-fixture') + internalClusterTestImplementation project(':test:fixtures:minio-fixture') + + yamlRestTestRuntimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" } restResources { @@ -83,13 +85,6 @@ tasks.named('test').configure { boolean useFixture = false -def fixtureAddress = { fixture, name, port -> - assert useFixture: 'closure should not be used without a fixture' - int ephemeralPort = project(":test:fixtures:${fixture}").postProcessFixture.ext."test.fixtures.${name}.tcp.${port}" - assert ephemeralPort > 0 - 'http://127.0.0.1:' + ephemeralPort -} - // We test against two repositories, one which uses the usual two-part "permanent" credentials and // the other which uses three-part "temporary" or "session" credentials. @@ -123,23 +118,13 @@ if (!s3PermanentAccessKey && !s3PermanentSecretKey && !s3PermanentBucket && !s3P s3PermanentSecretKey = 's3_test_secret_key' s3PermanentBucket = 'bucket' s3PermanentBasePath = 'base_path' - - apply plugin: 'elasticsearch.test.fixtures' useFixture = true - -} else if (!s3PermanentAccessKey || !s3PermanentSecretKey || !s3PermanentBucket || !s3PermanentBasePath) { - throw new IllegalArgumentException("not all options specified to run against external S3 service as permanent credentials are present") } - if (!s3TemporaryAccessKey && !s3TemporarySecretKey && !s3TemporaryBucket && !s3TemporaryBasePath && !s3TemporarySessionToken) { s3TemporaryAccessKey = 'session_token_access_key' s3TemporarySecretKey = 'session_token_secret_key' s3TemporaryBucket = 'session_token_bucket' s3TemporaryBasePath = 'session_token_base_path' - s3TemporarySessionToken = 'session_token' - -} else if (!s3TemporaryAccessKey || !s3TemporarySecretKey || !s3TemporaryBucket || !s3TemporaryBasePath || !s3TemporarySessionToken) { - throw new IllegalArgumentException("not all options specified to run against external S3 service as temporary credentials are present") } if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath) { @@ -147,18 +132,17 @@ if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath) { s3EC2BasePath = 'ec2_base_path' s3ECSBucket = 'ecs_bucket' s3ECSBasePath = 'ecs_base_path' -} else if (!s3EC2Bucket || !s3EC2BasePath || !s3ECSBucket || !s3ECSBasePath) { - throw new IllegalArgumentException("not all options specified to run EC2/ECS tests are present") } if (!s3STSBucket && !s3STSBasePath) { s3STSBucket = 'sts_bucket' s3STSBasePath = 'sts_base_path' -} else if (!s3STSBucket || !s3STSBasePath) { - throw new IllegalArgumentException("not all options specified to run STS tests are present") } tasks.named("processYamlRestTestResources").configure { + from("src/test/resources") { + include "aws-web-identity-token-file" + } Map expansions = [ 'permanent_bucket' : s3PermanentBucket, 'permanent_base_path' : s3PermanentBasePath + "_integration_tests", @@ -182,197 +166,36 @@ tasks.named("internalClusterTest").configure { } tasks.named("yamlRestTest").configure { - systemProperty 'tests.rest.blacklist', ( - useFixture ? - ['repository_s3/50_repository_ecs_credentials/*', - 'repository_s3/60_repository_sts_credentials/*'] - : - [ - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*', - 'repository_s3/60_repository_sts_credentials/*' - ] - ).join(",") -} - -if (useFixture) { - testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture') - testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-session-token') - testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-ec2') + systemProperty("s3PermanentAccessKey", s3PermanentAccessKey) + systemProperty("s3PermanentSecretKey", s3PermanentSecretKey) + systemProperty("s3TemporaryAccessKey", s3TemporaryAccessKey) + systemProperty("s3TemporarySecretKey", s3TemporarySecretKey) + systemProperty("s3EC2AccessKey", s3PermanentAccessKey) - normalization { - runtimeClasspath { - // ignore generated address file for the purposes of build avoidance - ignore 's3Fixture.address' - } - } -} - -testClusters.matching { it.name == "yamlRestTest" }.configureEach { - keystore 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey - keystore 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey - - keystore 's3.client.integration_test_temporary.access_key', s3TemporaryAccessKey - keystore 's3.client.integration_test_temporary.secret_key', s3TemporarySecretKey - keystore 's3.client.integration_test_temporary.session_token', s3TemporarySessionToken - - if (useFixture) { - setting 's3.client.integration_test_permanent.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture', '80')}" }, IGNORE_VALUE - setting 's3.client.integration_test_temporary.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-session-token', '80')}" }, IGNORE_VALUE - setting 's3.client.integration_test_ec2.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-ec2', '80')}" }, IGNORE_VALUE - - // to redirect InstanceProfileCredentialsProvider to custom auth point - systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-ec2', '80')}" }, IGNORE_VALUE - } else { - println "Using an external service to test the repository-s3 plugin" - } -} - -// MinIO -if (useFixture) { - testFixtures.useFixture(':test:fixtures:minio-fixture', 'minio-fixture') - - tasks.register("yamlRestTestMinio", RestIntegTestTask) { - description = "Runs REST tests using the Minio repository." - SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME) - setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) - setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) - - // Minio only supports a single access key, see https://github.com/minio/minio/pull/5968 - systemProperty 'tests.rest.blacklist', [ - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*', - 'repository_s3/60_repository_sts_credentials/*' - ].join(",") - } - tasks.named("check").configure { dependsOn("yamlRestTestMinio") } - - testClusters.matching { it.name == "yamlRestTestMinio" }.configureEach { - keystore 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey - keystore 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey - setting 's3.client.integration_test_permanent.endpoint', { "${-> fixtureAddress('minio-fixture', 'minio-fixture', '9000')}" }, IGNORE_VALUE - module tasks.named("explodedBundlePlugin") - } -} - -// ECS -if (useFixture) { - testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-ecs') - tasks.register("yamlRestTestECS", RestIntegTestTask.class) { - description = "Runs tests using the ECS repository." - SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME) - setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) - setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) - systemProperty 'tests.rest.blacklist', [ - 'repository_s3/10_basic/*', - 'repository_s3/20_repository_permanent_credentials/*', - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/60_repository_sts_credentials/*' - ].join(",") - } - tasks.named("check").configure { dependsOn("yamlRestTestECS") } - - testClusters.matching { it.name == "yamlRestTestECS" }.configureEach { - setting 's3.client.integration_test_ecs.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-ecs', '80')}" }, IGNORE_VALUE - module tasks.named('explodedBundlePlugin') - environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-ecs', '80')}/ecs_credentials_endpoint" }, IGNORE_VALUE - } -} - -// STS (Secure Token Service) -if (useFixture) { - testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-sts') - tasks.register("yamlRestTestSTS", RestIntegTestTask.class) { - description = "Runs tests with the STS (Secure Token Service)" - SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME) - setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) - setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) - systemProperty 'tests.rest.blacklist', [ - 'repository_s3/10_basic/*', - 'repository_s3/20_repository_permanent_credentials/*', - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*' - ].join(",") - } - tasks.named("check").configure { dependsOn("yamlRestTestSTS") } - - testClusters.matching { it.name == "yamlRestTestSTS" }.configureEach { - module tasks.named("explodedBundlePlugin") - - setting 's3.client.integration_test_sts.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-sts', '80')}" }, IGNORE_VALUE - systemProperty 'com.amazonaws.sdk.stsMetadataServiceEndpointOverride', - { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-sts', '80')}/assume-role-with-web-identity" }, IGNORE_VALUE - - File awsWebIdentityTokenExternalLocation = file('src/test/resources/aws-web-identity-token-file') - // The web identity token can be read only from the plugin config directory because of security restrictions - // Ideally we would create a symlink, but extraConfigFile doesn't support it - extraConfigFile 'repository-s3/aws-web-identity-token-file', awsWebIdentityTokenExternalLocation - environment 'AWS_WEB_IDENTITY_TOKEN_FILE', "$awsWebIdentityTokenExternalLocation" - - // The AWS STS SDK requires the role and session names to be set. We can verify that they are sent to S3S in the S3HttpFixtureWithSTS fixture - environment 'AWS_ROLE_ARN', 'arn:aws:iam::123456789012:role/FederatedWebIdentityRole' - environment 'AWS_ROLE_SESSION_NAME', 'sts-fixture-test' - } -} - -// Sanity test for STS Regional Endpoints -if (useFixture) { - tasks.register("yamlRestTestRegionalSTS", RestIntegTestTask.class) { - description = "Runs tests with the Regional STS Endpoint" - SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME) - setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) - setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) - // Run just the basic sanity test to make sure ES starts up and loads the S3 repository with - // a regional endpoint without an error. It would be great to make actual requests against - // a test fixture, but setting the region means using a production endpoint - systemProperty 'tests.rest.blacklist', [ - 'repository_s3/20_repository_permanent_credentials/*', - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*', - 'repository_s3/60_repository_sts_credentials/*' - ].join(",") - } - tasks.named("check").configure { dependsOn("yamlRestTestRegionalSTS") } - - testClusters.matching { it.name == "yamlRestTestRegionalSTS" }.configureEach { - module tasks.named("explodedBundlePlugin") - - File awsWebIdentityTokenExternalLocation = file('src/test/resources/aws-web-identity-token-file') - extraConfigFile 'repository-s3/aws-web-identity-token-file', awsWebIdentityTokenExternalLocation - environment 'AWS_WEB_IDENTITY_TOKEN_FILE', "$awsWebIdentityTokenExternalLocation" - environment 'AWS_ROLE_ARN', 'arn:aws:iam::123456789012:role/FederatedWebIdentityRole' - environment 'AWS_ROLE_SESSION_NAME', 'sts-fixture-test' - // Force the repository to set a regional production endpoint - environment 'AWS_STS_REGIONAL_ENDPOINTS', 'regional' - environment 'AWS_REGION', 'ap-southeast-2' - } + // ideally we could resolve an env path in cluster config as resource similar to configuring a config file + // not sure how common this is, but it would be nice to support + File awsWebIdentityTokenExternalLocation = file('src/test/resources/aws-web-identity-token-file') + // The web identity token can be read only from the plugin config directory because of security restrictions + // Ideally we would create a symlink, but extraConfigFile doesn't support it + nonInputProperties.systemProperty("awsWebIdentityTokenExternalLocation", awsWebIdentityTokenExternalLocation.getAbsolutePath()) } // 3rd Party Tests -TaskProvider s3ThirdPartyTest = tasks.register("s3ThirdPartyTest", Test) { +tasks.register("s3ThirdPartyTest", Test) { SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); SourceSet internalTestSourceSet = sourceSets.getByName(InternalClusterTestPlugin.SOURCE_SET_NAME) setTestClassesDirs(internalTestSourceSet.getOutput().getClassesDirs()) setClasspath(internalTestSourceSet.getRuntimeClasspath()) include '**/S3RepositoryThirdPartyTests.class' + systemProperty("tests.use.fixture", Boolean.toString(useFixture)) + + // test container accesses ~/.testcontainers.properties read + systemProperty "tests.security.manager", "false" systemProperty 'test.s3.account', s3PermanentAccessKey systemProperty 'test.s3.key', s3PermanentSecretKey systemProperty 'test.s3.bucket', s3PermanentBucket nonInputProperties.systemProperty 'test.s3.base', s3PermanentBasePath + "_third_party_tests_" + BuildParams.testSeed - if (useFixture) { - nonInputProperties.systemProperty 'test.s3.endpoint', "${-> fixtureAddress('minio-fixture', 'minio-fixture', '9000') }" - } } -tasks.named("check").configure { dependsOn(s3ThirdPartyTest) } tasks.named("thirdPartyAudit").configure { ignoreMissingClasses( @@ -405,3 +228,8 @@ tasks.named("thirdPartyAudit").configure { 'javax.activation.DataHandler' ) } + +tasks.named("check").configure { + dependsOn(tasks.withType(Test)) +} + diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index afa52dd56ea6a..18f5de496f9bb 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -11,6 +11,7 @@ import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; import com.amazonaws.services.s3.model.MultipartUpload; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -23,6 +24,7 @@ import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.core.Booleans; import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; @@ -31,8 +33,11 @@ import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.fixtures.minio.MinioTestContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.junit.ClassRule; import java.io.IOException; import java.util.Collection; @@ -48,7 +53,12 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { + static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("tests.use.fixture", "true")); + + @ClassRule + public static MinioTestContainer minio = new MinioTestContainer(USE_FIXTURE); @Override protected Collection> getPlugins() { @@ -92,7 +102,7 @@ protected void createRepository(String repoName) { Settings.Builder settings = Settings.builder() .put("bucket", System.getProperty("test.s3.bucket")) .put("base_path", System.getProperty("test.s3.base", "testpath")); - final String endpoint = System.getProperty("test.s3.endpoint"); + final String endpoint = USE_FIXTURE ? minio.getAddress() : System.getProperty("test.s3.endpoint"); if (endpoint != null) { settings.put("endpoint", endpoint); } else { diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/AbstractRepositoryS3ClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/AbstractRepositoryS3ClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..ecf6709a2fcef --- /dev/null +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/AbstractRepositoryS3ClientYamlTestSuiteIT.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +public abstract class AbstractRepositoryS3ClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + public AbstractRepositoryS3ClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } +} diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java index 1cbdf357d821b..d4b964fb3a7f0 100644 --- a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3ClientYamlTestSuiteIT.java @@ -8,20 +8,63 @@ package org.elasticsearch.repositories.s3; +import fixture.s3.S3HttpFixture; +import fixture.s3.S3HttpFixtureWithEC2; +import fixture.s3.S3HttpFixtureWithSessionToken; + import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; -import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) +public class RepositoryS3ClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { + + public static final S3HttpFixture s3Fixture = new S3HttpFixture(); + public static final S3HttpFixtureWithSessionToken s3HttpFixtureWithSessionToken = new S3HttpFixtureWithSessionToken(); + public static final S3HttpFixtureWithEC2 s3Ec2 = new S3HttpFixtureWithEC2(); -public class RepositoryS3ClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + private static final String s3TemporarySessionToken = "session_token"; + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .keystore("s3.client.integration_test_permanent.access_key", System.getProperty("s3PermanentAccessKey")) + .keystore("s3.client.integration_test_permanent.secret_key", System.getProperty("s3PermanentSecretKey")) + .keystore("s3.client.integration_test_temporary.access_key", System.getProperty("s3TemporaryAccessKey")) + .keystore("s3.client.integration_test_temporary.secret_key", System.getProperty("s3TemporarySecretKey")) + .keystore("s3.client.integration_test_temporary.session_token", s3TemporarySessionToken) + .setting("s3.client.integration_test_permanent.endpoint", s3Fixture::getAddress) + .setting("s3.client.integration_test_temporary.endpoint", s3HttpFixtureWithSessionToken::getAddress) + .setting("s3.client.integration_test_ec2.endpoint", s3Ec2::getAddress) + .systemProperty("com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", s3Ec2::getAddress) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(s3Ec2).around(s3HttpFixtureWithSessionToken).around(cluster); + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters( + new String[] { + "repository_s3/10_basic", + "repository_s3/20_repository_permanent_credentials", + "repository_s3/30_repository_temporary_credentials", + "repository_s3/40_repository_ec2_credentials" } + ); + } public RepositoryS3ClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { super(testCandidate); } - @ParametersFactory - public static Iterable parameters() throws Exception { - return ESClientYamlSuiteTestCase.createParameters(); + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); } } diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..e9bc9d0537cbb --- /dev/null +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3EcsClientYamlTestSuiteIT.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import fixture.s3.S3HttpFixtureWithECS; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +public class RepositoryS3EcsClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { + private static final S3HttpFixtureWithECS s3Ecs = new S3HttpFixtureWithECS(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .setting("s3.client.integration_test_ecs.endpoint", s3Ecs::getAddress) + .environment("AWS_CONTAINER_CREDENTIALS_FULL_URI", () -> (s3Ecs.getAddress() + "/ecs_credentials_endpoint")) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Ecs).around(cluster); + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(new String[] { "repository_s3/50_repository_ecs_credentials" }); + } + + public RepositoryS3EcsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..41f9983ef26e6 --- /dev/null +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3MinioClientYamlTestSuiteIT.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.fixtures.minio.MinioTestContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) +public class RepositoryS3MinioClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { + + public static MinioTestContainer minio = new MinioTestContainer(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .keystore("s3.client.integration_test_permanent.access_key", System.getProperty("s3PermanentAccessKey")) + .keystore("s3.client.integration_test_permanent.secret_key", System.getProperty("s3PermanentSecretKey")) + .setting("s3.client.integration_test_permanent.endpoint", () -> minio.getAddress()) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(minio).around(cluster); + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(new String[] { "repository_s3/10_basic", "repository_s3/20_repository_permanent_credentials" }); + } + + public RepositoryS3MinioClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RegionalStsClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RegionalStsClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..b0a7f84c03c85 --- /dev/null +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3RegionalStsClientYamlTestSuiteIT.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.junit.ClassRule; + +public class RepositoryS3RegionalStsClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .configFile("repository-s3/aws-web-identity-token-file", Resource.fromClasspath("aws-web-identity-token-file")) + .environment("AWS_WEB_IDENTITY_TOKEN_FILE", System.getProperty("awsWebIdentityTokenExternalLocation")) + // The AWS STS SDK requires the role and session names to be set. We can verify that they are sent to S3S in the + // S3HttpFixtureWithSTS fixture + .environment("AWS_ROLE_ARN", "arn:aws:iam::123456789012:role/FederatedWebIdentityRole") + .environment("AWS_ROLE_SESSION_NAME", "sts-fixture-test") + .environment("AWS_STS_REGIONAL_ENDPOINTS", "regional") + .environment("AWS_REGION", "ap-southeast-2") + .build(); + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(new String[] { "repository_s3/10_basic" }); + } + + public RepositoryS3RegionalStsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsClientYamlTestSuiteIT.java b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..eb105e02353b6 --- /dev/null +++ b/modules/repository-s3/src/yamlRestTest/java/org/elasticsearch/repositories/s3/RepositoryS3StsClientYamlTestSuiteIT.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories.s3; + +import fixture.s3.S3HttpFixture; +import fixture.s3.S3HttpFixtureWithSTS; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +public class RepositoryS3StsClientYamlTestSuiteIT extends AbstractRepositoryS3ClientYamlTestSuiteIT { + + public static final S3HttpFixture s3Fixture = new S3HttpFixture(); + private static final S3HttpFixtureWithSTS s3Sts = new S3HttpFixtureWithSTS(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("repository-s3") + .setting("s3.client.integration_test_sts.endpoint", s3Sts::getAddress) + .systemProperty("com.amazonaws.sdk.stsMetadataServiceEndpointOverride", () -> s3Sts.getAddress() + "/assume-role-with-web-identity") + .configFile("repository-s3/aws-web-identity-token-file", Resource.fromClasspath("aws-web-identity-token-file")) + .environment("AWS_WEB_IDENTITY_TOKEN_FILE", System.getProperty("awsWebIdentityTokenExternalLocation")) + // // The AWS STS SDK requires the role and session names to be set. We can verify that they are sent to S3S in the + // // S3HttpFixtureWithSTS fixture + .environment("AWS_ROLE_ARN", "arn:aws:iam::123456789012:role/FederatedWebIdentityRole") + .environment("AWS_ROLE_SESSION_NAME", "sts-fixture-test") + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(s3Fixture).around(s3Sts).around(cluster); + + @ParametersFactory + public static Iterable parameters() throws Exception { + return createParameters(new String[] { "repository_s3/60_repository_sts_credentials" }); + } + + public RepositoryS3StsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 06b92b8138cf7..e5bc4a729f8b1 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -38,6 +38,7 @@ import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.transport.Compression; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -69,7 +70,6 @@ import static java.util.Collections.singletonMap; import static java.util.stream.Collectors.toList; import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION; -import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_VERSION; import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.elasticsearch.test.MapMatcher.assertMap; @@ -1620,7 +1620,7 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { // If we are on 7.x create an alias that includes both a system index and a non-system index so we can be sure it gets // upgraded properly. If we're already on 8.x, skip this part of the test. - if (minimumNodeVersion().before(SYSTEM_INDEX_ENFORCEMENT_VERSION)) { + if (clusterHasFeature(RestTestLegacyFeatures.SYSTEM_INDICES_REST_ACCESS_ENFORCED) == false) { // Create an alias to make sure it gets upgraded properly Request putAliasRequest = new Request("POST", "/_aliases"); putAliasRequest.setJsonEntity(""" diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java index 44ee7f0b56d1c..fbd6ee8aa3759 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java @@ -14,11 +14,11 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.XContentTestUtils.JsonMapView; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; import java.util.Map; import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION; -import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_VERSION; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -89,7 +89,7 @@ public void testSystemIndicesUpgrades() throws Exception { // If we are on 7.x create an alias that includes both a system index and a non-system index so we can be sure it gets // upgraded properly. If we're already on 8.x, skip this part of the test. - if (minimumNodeVersion().before(SYSTEM_INDEX_ENFORCEMENT_VERSION)) { + if (clusterHasFeature(RestTestLegacyFeatures.SYSTEM_INDICES_REST_ACCESS_ENFORCED) == false) { // Create an alias to make sure it gets upgraded properly Request putAliasRequest = new Request("POST", "/_aliases"); putAliasRequest.setJsonEntity(""" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/20_docs.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/20_docs.yml index c7477c5b538ab..6a347df112b47 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/20_docs.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/20_docs.yml @@ -2,43 +2,64 @@ "Basic mlt query with docs": - do: indices.create: - index: test_1 + index: mlt_test_index - do: index: - index: test_1 + index: mlt_test_index id: "1" body: { foo: bar } - do: index: - index: test_1 + index: mlt_test_index id: "2" body: { foo: baz } - do: index: - index: test_1 + index: mlt_test_index id: "3" body: { foo: foo } - do: indices.refresh: {} + - do: + get: + index: mlt_test_index + id: "1" + + - match: { _source.foo: "bar" } + + - do: + get: + index: mlt_test_index + id: "2" + + - match: { _source.foo: "baz" } + + - do: + get: + index: mlt_test_index + id: "3" + + - match: { _source.foo: "foo" } + - do: search: rest_total_hits_as_int: true - index: test_1 + index: mlt_test_index body: query: more_like_this: like: - - _index: test_1 + _index: mlt_test_index doc: foo: bar - - _index: test_1 + _index: mlt_test_index _id: "2" - _id: "3" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/25_docs_one_shard.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/25_docs_one_shard.yml new file mode 100644 index 0000000000000..f44e48185d363 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/25_docs_one_shard.yml @@ -0,0 +1,75 @@ +--- +"Basic mlt query with docs - explicitly on same shard": + - do: + indices.create: + index: mlt_one_shard_test_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + + - do: + index: + index: mlt_one_shard_test_index + id: "1" + body: { foo: bar } + + - do: + index: + index: mlt_one_shard_test_index + id: "2" + body: { foo: baz } + + - do: + index: + index: mlt_one_shard_test_index + id: "3" + body: { foo: foo } + + - do: + indices.refresh: {} + + - do: + get: + index: mlt_one_shard_test_index + id: "1" + + - match: { _source.foo: "bar" } + + - do: + get: + index: mlt_one_shard_test_index + id: "2" + + - match: { _source.foo: "baz" } + + - do: + get: + index: mlt_one_shard_test_index + id: "3" + + - match: { _source.foo: "foo" } + + - do: + search: + rest_total_hits_as_int: true + index: mlt_one_shard_test_index + body: + query: + more_like_this: + like: + - + _index: mlt_one_shard_test_index + doc: + foo: bar + - + _index: mlt_one_shard_test_index + _id: "2" + - + _id: "3" + include: true + min_doc_freq: 0 + min_term_freq: 0 + + - match: { hits.total: 3 } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index d17ae1c7fce0d..4e1bc5b48efe0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -379,11 +379,10 @@ public void testSearchTaskDescriptions() { assertEquals(mainTask.get(0).taskId(), taskInfo.parentTaskId()); assertTaskHeaders(taskInfo); switch (taskInfo.action()) { - case SearchTransportService.QUERY_ACTION_NAME, SearchTransportService.QUERY_CAN_MATCH_NAME, - SearchTransportService.DFS_ACTION_NAME -> assertTrue( - taskInfo.description(), - Regex.simpleMatch("shardId[[test][*]]", taskInfo.description()) - ); + case SearchTransportService.QUERY_ACTION_NAME, SearchTransportService.DFS_ACTION_NAME -> assertTrue( + taskInfo.description(), + Regex.simpleMatch("shardId[[test][*]]", taskInfo.description()) + ); case SearchTransportService.QUERY_ID_ACTION_NAME -> assertTrue( taskInfo.description(), Regex.simpleMatch("id[*], indices[test]", taskInfo.description()) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java index 1a5f913e4bab2..30c57873fc6b1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java @@ -266,35 +266,6 @@ public void testFailingReposAreTreatedAsNonExistingShardSnapshots() throws Excep } } - public void testFetchingInformationFromAnIncompatibleMasterNodeReturnsAnEmptyList() { - String indexName = "test"; - createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build()); - ShardId shardId = getShardIdForIndex(indexName); - - for (int i = 0; i < randomIntBetween(1, 50); i++) { - index(indexName, Integer.toString(i), Collections.singletonMap("foo", "bar")); - } - - String snapshotName = "snap"; - String repositoryName = "repo"; - createRepository(repositoryName, "fs", randomRepoPath(), true); - createSnapshot(repositoryName, snapshotName, indexName); - - RepositoriesService repositoriesService = internalCluster().getAnyMasterNodeInstance(RepositoriesService.class); - ThreadPool threadPool = internalCluster().getAnyMasterNodeInstance(ThreadPool.class); - ClusterService clusterService = internalCluster().getAnyMasterNodeInstance(ClusterService.class); - ShardSnapshotsService shardSnapshotsService = new ShardSnapshotsService(client(), repositoriesService, threadPool, clusterService) { - @Override - protected boolean masterSupportsFetchingLatestSnapshots() { - return false; - } - }; - - PlainActionFuture> latestSnapshots = new PlainActionFuture<>(); - shardSnapshotsService.fetchLatestSnapshotsForShard(shardId, latestSnapshots); - assertThat(latestSnapshots.actionGet().isPresent(), is(equalTo(false))); - } - private Optional getLatestShardSnapshot(ShardId shardId) throws Exception { ShardSnapshotsService shardSnapshotsService = getShardSnapshotsService(); diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 7f9328e2c08ab..613e6868b8e9f 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -422,5 +422,9 @@ org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat; provides org.apache.lucene.codecs.DocValuesFormat with ES87TSDBDocValuesFormat; - exports org.elasticsearch.cluster.routing.allocation.shards to org.elasticsearch.shardhealth, org.elasticsearch.serverless.shardhealth; + exports org.elasticsearch.cluster.routing.allocation.shards + to + org.elasticsearch.shardhealth, + org.elasticsearch.serverless.shardhealth, + org.elasticsearch.serverless.apifiltering; } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 535192eeefd47..a1675b3e2b908 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -9,6 +9,7 @@ package org.elasticsearch; import org.elasticsearch.core.Assertions; +import org.elasticsearch.core.UpdateForV9; import java.lang.reflect.Field; import java.util.Collection; @@ -47,6 +48,7 @@ static TransportVersion def(int id) { return new TransportVersion(id); } + @UpdateForV9 // remove the transport versions with which v9 will not need to interact public static final TransportVersion ZERO = def(0); public static final TransportVersion V_7_0_0 = def(7_00_00_99); public static final TransportVersion V_7_0_1 = def(7_00_01_99); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 800ad7afbb8db..96824844d4ba4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -8,8 +8,6 @@ package org.elasticsearch.action.search; -import org.elasticsearch.TransportVersions; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; @@ -26,10 +24,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; -import org.elasticsearch.search.CanMatchShardResponse; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.dfs.DfsSearchResult; @@ -58,12 +54,9 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.Arrays; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.function.BiFunction; /** @@ -82,7 +75,6 @@ public class SearchTransportService { public static final String QUERY_FETCH_SCROLL_ACTION_NAME = "indices:data/read/search[phase/query+fetch/scroll]"; public static final String FETCH_ID_SCROLL_ACTION_NAME = "indices:data/read/search[phase/fetch/id/scroll]"; public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]"; - public static final String QUERY_CAN_MATCH_NAME = "indices:data/read/search[can_match]"; public static final String QUERY_CAN_MATCH_NODE_NAME = "indices:data/read/search[can_match][n]"; private final TransportService transportService; @@ -137,79 +129,20 @@ public void sendFreeContext( public void sendCanMatch( Transport.Connection connection, - final ShardSearchRequest request, + final CanMatchNodeRequest request, SearchTask task, - final ActionListener listener + final ActionListener listener ) { transportService.sendChildRequest( connection, - QUERY_CAN_MATCH_NAME, + QUERY_CAN_MATCH_NODE_NAME, request, task, TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(listener, CanMatchShardResponse::new, TransportResponseHandler.TRANSPORT_WORKER) + new ActionListenerResponseHandler<>(listener, CanMatchNodeResponse::new, TransportResponseHandler.TRANSPORT_WORKER) ); } - public void sendCanMatch( - Transport.Connection connection, - final CanMatchNodeRequest request, - SearchTask task, - final ActionListener listener - ) { - if (connection.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0) - && connection.getNode().getVersion().onOrAfter(Version.V_7_16_0)) { - transportService.sendChildRequest( - connection, - QUERY_CAN_MATCH_NODE_NAME, - request, - task, - TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(listener, CanMatchNodeResponse::new, TransportResponseHandler.TRANSPORT_WORKER) - ); - } else { - // BWC layer: translate into shard-level requests - final List shardSearchRequests = request.createShardSearchRequests(); - final AtomicReferenceArray results = new AtomicReferenceArray<>( - shardSearchRequests.size() - ); - final CountDown counter = new CountDown(shardSearchRequests.size()); - final Runnable maybeFinish = () -> { - if (counter.countDown()) { - final CanMatchNodeResponse.ResponseOrFailure[] responses = - new CanMatchNodeResponse.ResponseOrFailure[shardSearchRequests.size()]; - for (int i = 0; i < responses.length; i++) { - responses[i] = results.get(i); - } - final CanMatchNodeResponse response = new CanMatchNodeResponse(Arrays.asList(responses)); - listener.onResponse(response); - } - }; - for (int i = 0; i < shardSearchRequests.size(); i++) { - final ShardSearchRequest shardSearchRequest = shardSearchRequests.get(i); - final int finalI = i; - try { - sendCanMatch(connection, shardSearchRequest, task, new ActionListener<>() { - @Override - public void onResponse(CanMatchShardResponse response) { - results.set(finalI, new CanMatchNodeResponse.ResponseOrFailure(response)); - maybeFinish.run(); - } - - @Override - public void onFailure(Exception e) { - results.set(finalI, new CanMatchNodeResponse.ResponseOrFailure(e)); - maybeFinish.run(); - } - }); - } catch (Exception e) { - results.set(finalI, new CanMatchNodeResponse.ResponseOrFailure(e)); - maybeFinish.run(); - } - } - } - } - public void sendClearAllScrollContexts(Transport.Connection connection, final ActionListener listener) { transportService.sendRequest( connection, @@ -565,24 +498,11 @@ public static void registerRequestHandler(TransportService transportService, Sea ); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_ACTION_NAME, true, FetchSearchResult::new); - // this is cheap, it does not fetch during the rewrite phase, so we can let it quickly execute on a networking thread - transportService.registerRequestHandler( - QUERY_CAN_MATCH_NAME, - EsExecutors.DIRECT_EXECUTOR_SERVICE, - ShardSearchRequest::new, - (request, channel, task) -> { - searchService.canMatch(request, new ChannelActionListener<>(channel)); - } - ); - TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NAME, true, CanMatchShardResponse::new); - transportService.registerRequestHandler( QUERY_CAN_MATCH_NODE_NAME, transportService.getThreadPool().executor(ThreadPool.Names.SEARCH_COORDINATION), CanMatchNodeRequest::new, - (request, channel, task) -> { - searchService.canMatch(request, new ChannelActionListener<>(channel)); - } + (request, channel, task) -> searchService.canMatch(request, new ChannelActionListener<>(channel)) ); TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NODE_NAME, true, CanMatchNodeResponse::new); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index b50b1e0a74d93..0446b479b191d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -10,7 +10,6 @@ import org.apache.lucene.util.automaton.Automaton; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -63,7 +62,6 @@ public class IndexNameExpressionResolver { private static final Predicate ALWAYS_TRUE = s -> true; public static final String EXCLUDED_DATA_STREAMS_KEY = "es.excluded_ds"; - public static final Version SYSTEM_INDEX_ENFORCEMENT_VERSION = Version.V_8_0_0; public static final IndexVersion SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION = IndexVersions.V_8_0_0; private final ThreadContext threadContext; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index bd99003d3fe0c..64f1eb704a2f3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -293,6 +293,10 @@ private void onNoLongerMaster() { queue.completeAllAsNotMaster(); pendingDesiredBalanceMoves.clear(); desiredBalanceReconciler.clear(); + + desiredBalanceReconciler.unassignedShards.set(0); + desiredBalanceReconciler.totalAllocations.set(0); + desiredBalanceReconciler.undesiredAllocations.set(0); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java index 76ca9f88b4b58..74da033fd8811 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java @@ -249,7 +249,7 @@ static void updateShardAllocationStatus( ); public static final String ENABLE_TIER_ACTION_GUIDE = "https://ela.st/enable-tier"; - public static final Map ACTION_ENABLE_TIERS_LOOKUP = DataTier.ALL_DATA_TIERS.stream() + private static final Map ACTION_ENABLE_TIERS_LOOKUP = DataTier.ALL_DATA_TIERS.stream() .collect( Collectors.toUnmodifiableMap( tier -> tier, @@ -276,7 +276,7 @@ static void updateShardAllocationStatus( INCREASE_SHARD_LIMIT_ACTION_GUIDE ); - public static final Map ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP = DataTier.ALL_DATA_TIERS + private static final Map ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP = DataTier.ALL_DATA_TIERS .stream() .collect( Collectors.toUnmodifiableMap( @@ -307,7 +307,7 @@ static void updateShardAllocationStatus( INCREASE_CLUSTER_SHARD_LIMIT_ACTION_GUIDE ); - public static final Map ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP = DataTier.ALL_DATA_TIERS + private static final Map ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP = DataTier.ALL_DATA_TIERS .stream() .collect( Collectors.toUnmodifiableMap( @@ -405,6 +405,7 @@ static void updateShardAllocationStatus( TIER_CAPACITY_ACTION_GUIDE ); + // Visible for testing public static final Map ACTION_INCREASE_TIER_CAPACITY_LOOKUP = DataTier.ALL_DATA_TIERS.stream() .collect( Collectors.toUnmodifiableMap( @@ -622,11 +623,11 @@ List diagnoseAllocationResults( ClusterState state, List nodeAllocationResults ) { - IndexMetadata index = state.metadata().index(shardRouting.index()); + IndexMetadata indexMetadata = state.metadata().index(shardRouting.index()); List diagnosisDefs = new ArrayList<>(); - if (index != null) { - diagnosisDefs.addAll(checkIsAllocationDisabled(index, nodeAllocationResults)); - diagnosisDefs.addAll(checkDataTierRelatedIssues(index, nodeAllocationResults, state)); + if (indexMetadata != null) { + diagnosisDefs.addAll(checkIsAllocationDisabled(indexMetadata, nodeAllocationResults)); + diagnosisDefs.addAll(checkNodeRoleRelatedIssues(indexMetadata, nodeAllocationResults, state, shardRouting)); } if (diagnosisDefs.isEmpty()) { diagnosisDefs.add(ACTION_CHECK_ALLOCATION_EXPLAIN_API); @@ -640,7 +641,7 @@ List diagnoseAllocationResults( * @param outcome The outcome expected * @return A predicate that returns true if the decision exists and matches the expected outcome, false otherwise. */ - private static Predicate hasDeciderResult(String deciderName, Decision.Type outcome) { + protected static Predicate hasDeciderResult(String deciderName, Decision.Type outcome) { return (nodeResult) -> { Decision decision = nodeResult.getCanAllocateDecision(); return decision != null && decision.getDecisions().stream().anyMatch(d -> deciderName.equals(d.label()) && outcome == d.type()); @@ -676,26 +677,29 @@ List checkIsAllocationDisabled(IndexMetadata indexMetadata } /** - * Generates a list of diagnoses for common problems that keep a shard from allocating to nodes in a data tier. + * Generates a list of diagnoses for common problems that keep a shard from allocating to nodes depending on their role; + * a very common example of such a case are data tiers. * @param indexMetadata Index metadata for the shard being diagnosed. * @param nodeAllocationResults allocation decision results for all nodes in the cluster. * @param clusterState the current cluster state. + * @param shardRouting the shard the nodeAllocationResults refer to * @return A list of diagnoses for the provided unassigned shard */ - public List checkDataTierRelatedIssues( + protected List checkNodeRoleRelatedIssues( IndexMetadata indexMetadata, List nodeAllocationResults, - ClusterState clusterState + ClusterState clusterState, + ShardRouting shardRouting ) { List diagnosisDefs = new ArrayList<>(); - if (indexMetadata.getTierPreference().size() > 0) { + if (indexMetadata.getTierPreference().isEmpty() == false) { List dataTierAllocationResults = nodeAllocationResults.stream() .filter(hasDeciderResult(DATA_TIER_ALLOCATION_DECIDER_NAME, Decision.Type.YES)) .toList(); if (dataTierAllocationResults.isEmpty()) { // Shard must be allocated on specific tiers but no nodes were enabled for those tiers. for (String tier : indexMetadata.getTierPreference()) { - Optional.ofNullable(ACTION_ENABLE_TIERS_LOOKUP.get(tier)).ifPresent(diagnosisDefs::add); + Optional.ofNullable(getAddNodesWithRoleAction(tier)).ifPresent(diagnosisDefs::add); } } else { // Collect the nodes from the tiers this index is allowed on @@ -719,29 +723,29 @@ public List checkDataTierRelatedIssues( // Run checks for data tier specific problems diagnosisDefs.addAll( - checkDataTierAtShardLimit(indexMetadata, clusterState, dataTierAllocationResults, dataTierNodes, preferredTier) + checkNodesWithRoleAtShardLimit(indexMetadata, clusterState, dataTierAllocationResults, dataTierNodes, preferredTier) ); diagnosisDefs.addAll(checkDataTierShouldMigrate(indexMetadata, dataTierAllocationResults, preferredTier, dataTierNodes)); - checkNotEnoughNodesInDataTier(dataTierAllocationResults, preferredTier).ifPresent(diagnosisDefs::add); + checkNotEnoughNodesWithRole(dataTierAllocationResults, preferredTier).ifPresent(diagnosisDefs::add); } } return diagnosisDefs; } - private List checkDataTierAtShardLimit( + protected List checkNodesWithRoleAtShardLimit( IndexMetadata indexMetadata, ClusterState clusterState, - List dataTierAllocationResults, - Set dataTierNodes, - @Nullable String preferredTier + List nodeRoleAllocationResults, + Set nodesWithRoles, + @Nullable String role ) { - // All tier nodes at shards limit? - if (dataTierAllocationResults.stream().allMatch(hasDeciderResult(ShardsLimitAllocationDecider.NAME, Decision.Type.NO))) { + // All applicable nodes at shards limit? + if (nodeRoleAllocationResults.stream().allMatch(hasDeciderResult(ShardsLimitAllocationDecider.NAME, Decision.Type.NO))) { List diagnosisDefs = new ArrayList<>(); - // We need the routing nodes for the tiers this index is allowed on to determine the offending shard limits - List dataTierRoutingNodes = clusterState.getRoutingNodes() + // We need the routing nodes for the role this index is allowed on to determine the offending shard limits + List candidateNodes = clusterState.getRoutingNodes() .stream() - .filter(routingNode -> dataTierNodes.contains(routingNode.node())) + .filter(routingNode -> nodesWithRoles.contains(routingNode.node())) .toList(); // Determine which total_shards_per_node settings are present @@ -752,34 +756,29 @@ private List checkDataTierAtShardLimit( // Determine which total_shards_per_node settings are keeping things from allocating boolean clusterShardsPerNodeShouldChange = false; if (clusterShardsPerNode > 0) { - int minShardCountInTier = dataTierRoutingNodes.stream() - .map(RoutingNode::numberOfOwningShards) - .min(Integer::compareTo) - .orElse(-1); - clusterShardsPerNodeShouldChange = minShardCountInTier >= clusterShardsPerNode; + int minShardCount = candidateNodes.stream().map(RoutingNode::numberOfOwningShards).min(Integer::compareTo).orElse(-1); + clusterShardsPerNodeShouldChange = minShardCount >= clusterShardsPerNode; } boolean indexShardsPerNodeShouldChange = false; if (indexShardsPerNode > 0) { - int minShardCountInTier = dataTierRoutingNodes.stream() + int minShardCount = candidateNodes.stream() .map(routingNode -> routingNode.numberOfOwningShardsForIndex(indexMetadata.getIndex())) .min(Integer::compareTo) .orElse(-1); - indexShardsPerNodeShouldChange = minShardCountInTier >= indexShardsPerNode; + indexShardsPerNodeShouldChange = minShardCount >= indexShardsPerNode; } // Add appropriate diagnosis - if (preferredTier != null) { - // We cannot allocate the shard to the most preferred tier because a shard limit is reached. + if (role != null) { + // We cannot allocate the shard to the most preferred role because a shard limit is reached. if (clusterShardsPerNodeShouldChange) { - Optional.ofNullable(ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP.get(preferredTier)) - .ifPresent(diagnosisDefs::add); + Optional.ofNullable(getIncreaseShardLimitClusterSettingAction(role)).ifPresent(diagnosisDefs::add); } if (indexShardsPerNodeShouldChange) { - Optional.ofNullable(ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP.get(preferredTier)).ifPresent(diagnosisDefs::add); + Optional.ofNullable(getIncreaseShardLimitIndexSettingAction(role)).ifPresent(diagnosisDefs::add); } } else { - // We couldn't determine a desired tier. This is likely because there are no tiers in the cluster, - // only `data` nodes. Give a generic ask for increasing the shard limit. + // We couldn't determine a desired role. Give a generic ask for increasing the shard limit. if (clusterShardsPerNodeShouldChange) { diagnosisDefs.add(ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING); } @@ -838,16 +837,16 @@ private static List checkDataTierShouldMigrate( } } - private static Optional checkNotEnoughNodesInDataTier( - List dataTierAllocationResults, - @Nullable String preferredTier + protected Optional checkNotEnoughNodesWithRole( + List nodeAllocationResults, + @Nullable String role ) { - // Not enough tier nodes to hold shards on different nodes? - if (dataTierAllocationResults.stream().allMatch(hasDeciderResult(SameShardAllocationDecider.NAME, Decision.Type.NO))) { - // We couldn't determine a desired tier. This is likely because there are no tiers in the cluster, - // only `data` nodes. Give a generic ask for increasing the shard limit. - if (preferredTier != null) { - return Optional.ofNullable(ACTION_INCREASE_TIER_CAPACITY_LOOKUP.get(preferredTier)); + // Not enough nodes to hold shards on different nodes? + if (nodeAllocationResults.stream().allMatch(hasDeciderResult(SameShardAllocationDecider.NAME, Decision.Type.NO))) { + // We couldn't determine a desired role. This is likely because there are no nodes with the relevant role in the cluster. + // Give a generic ask for increasing the shard limit. + if (role != null) { + return Optional.ofNullable(getIncreaseNodeWithRoleCapacityAction(role)); } else { return Optional.of(ACTION_INCREASE_NODE_CAPACITY); } @@ -856,6 +855,26 @@ private static Optional checkNotEnoughNodesInDataTier( } } + @Nullable + public Diagnosis.Definition getAddNodesWithRoleAction(String role) { + return ACTION_ENABLE_TIERS_LOOKUP.get(role); + } + + @Nullable + public Diagnosis.Definition getIncreaseShardLimitIndexSettingAction(String role) { + return ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP.get(role); + } + + @Nullable + public Diagnosis.Definition getIncreaseShardLimitClusterSettingAction(String role) { + return ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP.get(role); + } + + @Nullable + public Diagnosis.Definition getIncreaseNodeWithRoleCapacityAction(String role) { + return ACTION_INCREASE_TIER_CAPACITY_LOOKUP.get(role); + } + public class ShardAllocationStatus { protected final ShardAllocationCounts primaries = new ShardAllocationCounts(); protected final ShardAllocationCounts replicas = new ShardAllocationCounts(); diff --git a/server/src/main/java/org/elasticsearch/features/FeatureData.java b/server/src/main/java/org/elasticsearch/features/FeatureData.java new file mode 100644 index 0000000000000..273617205ee47 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/features/FeatureData.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.features; + +import org.elasticsearch.Version; +import org.elasticsearch.common.Strings; + +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Set; +import java.util.TreeMap; + +import static org.elasticsearch.features.FeatureService.CLUSTER_FEATURES_ADDED_VERSION; + +/** + * Reads and consolidate features exposed by a list {@link FeatureSpecification}, grouping them into historical features and node + * features for the consumption of {@link FeatureService} + */ +public class FeatureData { + private final NavigableMap> historicalFeatures; + private final Map nodeFeatures; + + private FeatureData(NavigableMap> historicalFeatures, Map nodeFeatures) { + this.historicalFeatures = historicalFeatures; + this.nodeFeatures = nodeFeatures; + } + + public static FeatureData createFromSpecifications(List specs) { + Map allFeatures = new HashMap<>(); + + NavigableMap> historicalFeatures = new TreeMap<>(); + Map nodeFeatures = new HashMap<>(); + for (FeatureSpecification spec : specs) { + for (var hfe : spec.getHistoricalFeatures().entrySet()) { + FeatureSpecification existing = allFeatures.putIfAbsent(hfe.getKey().id(), spec); + // the same SPI class can be loaded multiple times if it's in the base classloader + if (existing != null && existing.getClass() != spec.getClass()) { + throw new IllegalArgumentException( + Strings.format("Duplicate feature - [%s] is declared by both [%s] and [%s]", hfe.getKey().id(), existing, spec) + ); + } + + if (hfe.getValue().after(CLUSTER_FEATURES_ADDED_VERSION)) { + throw new IllegalArgumentException( + Strings.format( + "Historical feature [%s] declared by [%s] for version [%s] is not a historical version", + hfe.getKey().id(), + spec, + hfe.getValue() + ) + ); + } + + historicalFeatures.computeIfAbsent(hfe.getValue(), k -> new HashSet<>()).add(hfe.getKey().id()); + } + + for (NodeFeature f : spec.getFeatures()) { + FeatureSpecification existing = allFeatures.putIfAbsent(f.id(), spec); + if (existing != null && existing.getClass() != spec.getClass()) { + throw new IllegalArgumentException( + Strings.format("Duplicate feature - [%s] is declared by both [%s] and [%s]", f.id(), existing, spec) + ); + } + + nodeFeatures.put(f.id(), f); + } + } + + return new FeatureData(consolidateHistoricalFeatures(historicalFeatures), Map.copyOf(nodeFeatures)); + } + + private static NavigableMap> consolidateHistoricalFeatures( + NavigableMap> declaredHistoricalFeatures + ) { + // update each version by adding in all features from previous versions + Set featureAggregator = new HashSet<>(); + for (Map.Entry> versions : declaredHistoricalFeatures.entrySet()) { + featureAggregator.addAll(versions.getValue()); + versions.setValue(Set.copyOf(featureAggregator)); + } + + return Collections.unmodifiableNavigableMap(declaredHistoricalFeatures); + } + + public NavigableMap> getHistoricalFeatures() { + return historicalFeatures; + } + + public Map getNodeFeatures() { + return nodeFeatures; + } +} diff --git a/server/src/main/java/org/elasticsearch/features/FeatureService.java b/server/src/main/java/org/elasticsearch/features/FeatureService.java index d88589ac1ede8..1d60627656b9e 100644 --- a/server/src/main/java/org/elasticsearch/features/FeatureService.java +++ b/server/src/main/java/org/elasticsearch/features/FeatureService.java @@ -10,19 +10,14 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.common.Strings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.Set; -import java.util.TreeMap; /** * Manages information on the features supported by nodes in the cluster @@ -42,65 +37,14 @@ public class FeatureService { private final Map nodeFeatures; public FeatureService(List specs) { - Map allFeatures = new HashMap<>(); - NavigableMap> historicalFeatures = new TreeMap<>(); - Map nodeFeatures = new HashMap<>(); - for (FeatureSpecification spec : specs) { - for (var hfe : spec.getHistoricalFeatures().entrySet()) { - FeatureSpecification existing = allFeatures.putIfAbsent(hfe.getKey().id(), spec); - // the same SPI class can be loaded multiple times if it's in the base classloader - if (existing != null && existing.getClass() != spec.getClass()) { - throw new IllegalArgumentException( - Strings.format("Duplicate feature - [%s] is declared by both [%s] and [%s]", hfe.getKey().id(), existing, spec) - ); - } - - if (hfe.getValue().onOrAfter(CLUSTER_FEATURES_ADDED_VERSION)) { - throw new IllegalArgumentException( - Strings.format( - "Historical feature [%s] declared by [%s] for version [%s] is not a historical version", - hfe.getKey().id(), - spec, - hfe.getValue() - ) - ); - } - - historicalFeatures.computeIfAbsent(hfe.getValue(), k -> new HashSet<>()).add(hfe.getKey().id()); - } - - for (NodeFeature f : spec.getFeatures()) { - FeatureSpecification existing = allFeatures.putIfAbsent(f.id(), spec); - if (existing != null && existing.getClass() != spec.getClass()) { - throw new IllegalArgumentException( - Strings.format("Duplicate feature - [%s] is declared by both [%s] and [%s]", f.id(), existing, spec) - ); - } - - nodeFeatures.put(f.id(), f); - } - } - - this.historicalFeatures = consolidateHistoricalFeatures(historicalFeatures); - this.nodeFeatures = Map.copyOf(nodeFeatures); + var featureData = FeatureData.createFromSpecifications(specs); + nodeFeatures = featureData.getNodeFeatures(); + historicalFeatures = featureData.getHistoricalFeatures(); logger.info("Registered local node features {}", nodeFeatures.keySet().stream().sorted().toList()); } - private static NavigableMap> consolidateHistoricalFeatures( - NavigableMap> declaredHistoricalFeatures - ) { - // update each version by adding in all features from previous versions - Set featureAggregator = new HashSet<>(); - for (Map.Entry> versions : declaredHistoricalFeatures.entrySet()) { - featureAggregator.addAll(versions.getValue()); - versions.setValue(Set.copyOf(featureAggregator)); - } - - return Collections.unmodifiableNavigableMap(declaredHistoricalFeatures); - } - /** * The non-historical features supported by this node. */ diff --git a/server/src/main/java/org/elasticsearch/health/GetHealthAction.java b/server/src/main/java/org/elasticsearch/health/GetHealthAction.java index 0e4722a872c4e..c1efa58a50c86 100644 --- a/server/src/main/java/org/elasticsearch/health/GetHealthAction.java +++ b/server/src/main/java/org/elasticsearch/health/GetHealthAction.java @@ -67,6 +67,12 @@ public Response(final ClusterName clusterName, final List } } + public Response(final ClusterName clusterName, final List indicators, HealthStatus topLevelStatus) { + this.indicators = indicators; + this.clusterName = clusterName; + this.status = topLevelStatus; + } + public ClusterName getClusterName() { return clusterName; } diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 6327c2ba53f54..b6bebcf6abb12 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.Version; import org.elasticsearch.core.Assertions; +import org.elasticsearch.core.UpdateForV9; import java.lang.reflect.Field; import java.util.Collection; @@ -44,6 +45,7 @@ private static IndexVersion def(int id, Version luceneVersion) { return new IndexVersion(id, luceneVersion); } + @UpdateForV9 // remove the index versions with which v9 will not need to interact public static final IndexVersion ZERO = def(0, Version.LATEST); public static final IndexVersion V_7_0_0 = def(7_00_00_99, Version.LUCENE_8_0_0); @@ -87,6 +89,7 @@ private static IndexVersion def(int id, Version luceneVersion) { public static final IndexVersion NEW_SPARSE_VECTOR = def(8_500_001, Version.LUCENE_9_7_0); public static final IndexVersion SPARSE_VECTOR_IN_FIELD_NAMES_SUPPORT = def(8_500_002, Version.LUCENE_9_7_0); public static final IndexVersion UPGRADE_LUCENE_9_8 = def(8_500_003, Version.LUCENE_9_8_0); + public static final IndexVersion ES_VERSION_8_12 = def(8_500_004, Version.LUCENE_9_8_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java index 2e894ea304fdc..340af1c1f7347 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java @@ -10,16 +10,18 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.FilterLeafReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.OrdinalMap; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.packed.PackedInts; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; import org.elasticsearch.index.fielddata.LeafOrdinalsFieldData; import org.elasticsearch.index.fielddata.plain.AbstractLeafOrdinalsFieldData; -import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.script.field.ToScriptFieldFactory; import java.io.IOException; @@ -37,7 +39,7 @@ public enum GlobalOrdinalsBuilder { public static IndexOrdinalsFieldData build( final IndexReader indexReader, IndexOrdinalsFieldData indexFieldData, - CircuitBreakerService breakerService, + CircuitBreaker breaker, Logger logger, ToScriptFieldFactory toScriptFieldFactory ) throws IOException { @@ -50,9 +52,26 @@ public static IndexOrdinalsFieldData build( atomicFD[i] = indexFieldData.load(indexReader.leaves().get(i)); subs[i] = atomicFD[i].getOrdinalsValues(); } - final OrdinalMap ordinalMap = OrdinalMap.build(null, subs, PackedInts.DEFAULT); + final TermsEnum[] termsEnums = new TermsEnum[subs.length]; + final long[] weights = new long[subs.length]; + // we assume that TermsEnum are visited sequentially, so we can share the counter between them + final long[] counter = new long[1]; + for (int i = 0; i < subs.length; ++i) { + termsEnums[i] = new FilterLeafReader.FilterTermsEnum(subs[i].termsEnum()) { + @Override + public BytesRef next() throws IOException { + // check parent circuit breaker every 65536 calls + if ((counter[0]++ & 0xFFFF) == 0) { + breaker.addEstimateBytesAndMaybeBreak(0L, "Global Ordinals"); + } + return in.next(); + } + }; + weights[i] = subs[i].getValueCount(); + } + final OrdinalMap ordinalMap = OrdinalMap.build(null, termsEnums, weights, PackedInts.DEFAULT); final long memorySizeInBytes = ordinalMap.ramBytesUsed(); - breakerService.getBreaker(CircuitBreaker.FIELDDATA).addWithoutBreaking(memorySizeInBytes); + breaker.addWithoutBreaking(memorySizeInBytes); TimeValue took = new TimeValue(System.nanoTime() - startTimeNS, TimeUnit.NANOSECONDS); if (logger.isDebugEnabled()) { @@ -108,5 +127,4 @@ public void close() {} took ); } - } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java index 610f4a19f1a52..6b30a27def441 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java @@ -17,6 +17,7 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; import org.elasticsearch.index.fielddata.LeafOrdinalsFieldData; @@ -136,7 +137,13 @@ private IndexOrdinalsFieldData loadGlobalInternal(DirectoryReader indexReader) { @Override public IndexOrdinalsFieldData loadGlobalDirect(DirectoryReader indexReader) throws Exception { - return GlobalOrdinalsBuilder.build(indexReader, this, breakerService, logger, toScriptFieldFactory); + return GlobalOrdinalsBuilder.build( + indexReader, + this, + breakerService.getBreaker(CircuitBreaker.FIELDDATA), + logger, + toScriptFieldFactory + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java index 6e572eceeafc4..e52e3177c6a54 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java @@ -95,12 +95,19 @@ public Builder builder(BlockFactory factory, int expectedCount) { @Override public AllReader reader(LeafReaderContext context) throws IOException { - SortedNumericDocValues docValues = DocValues.getSortedNumeric(context.reader(), fieldName); - NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + SortedNumericDocValues docValues = context.reader().getSortedNumericDocValues(fieldName); + if (docValues != null) { + NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + if (singleton != null) { + return new SingletonLongs(singleton); + } + return new Longs(docValues); + } + NumericDocValues singleton = context.reader().getNumericDocValues(fieldName); if (singleton != null) { return new SingletonLongs(singleton); } - return new Longs(docValues); + return new ConstantNullsReader(); } } @@ -223,12 +230,19 @@ public Builder builder(BlockFactory factory, int expectedCount) { @Override public AllReader reader(LeafReaderContext context) throws IOException { - SortedNumericDocValues docValues = DocValues.getSortedNumeric(context.reader(), fieldName); - NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + SortedNumericDocValues docValues = context.reader().getSortedNumericDocValues(fieldName); + if (docValues != null) { + NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + if (singleton != null) { + return new SingletonInts(singleton); + } + return new Ints(docValues); + } + NumericDocValues singleton = context.reader().getNumericDocValues(fieldName); if (singleton != null) { return new SingletonInts(singleton); } - return new Ints(docValues); + return new ConstantNullsReader(); } } @@ -362,12 +376,19 @@ public Builder builder(BlockFactory factory, int expectedCount) { @Override public AllReader reader(LeafReaderContext context) throws IOException { - SortedNumericDocValues docValues = DocValues.getSortedNumeric(context.reader(), fieldName); - NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + SortedNumericDocValues docValues = context.reader().getSortedNumericDocValues(fieldName); + if (docValues != null) { + NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + if (singleton != null) { + return new SingletonDoubles(singleton, toDouble); + } + return new Doubles(docValues, toDouble); + } + NumericDocValues singleton = context.reader().getNumericDocValues(fieldName); if (singleton != null) { return new SingletonDoubles(singleton, toDouble); } - return new Doubles(docValues, toDouble); + return new ConstantNullsReader(); } } @@ -496,12 +517,19 @@ public BytesRefBuilder builder(BlockFactory factory, int expectedCount) { @Override public AllReader reader(LeafReaderContext context) throws IOException { - SortedSetDocValues docValues = ordinals(context); - SortedDocValues singleton = DocValues.unwrapSingleton(docValues); + SortedSetDocValues docValues = context.reader().getSortedSetDocValues(fieldName); + if (docValues != null) { + SortedDocValues singleton = DocValues.unwrapSingleton(docValues); + if (singleton != null) { + return new SingletonOrdinals(singleton); + } + return new Ordinals(docValues); + } + SortedDocValues singleton = context.reader().getSortedDocValues(fieldName); if (singleton != null) { return new SingletonOrdinals(singleton); } - return new Ordinals(docValues); + return new ConstantNullsReader(); } @Override @@ -719,12 +747,19 @@ public BooleanBuilder builder(BlockFactory factory, int expectedCount) { @Override public AllReader reader(LeafReaderContext context) throws IOException { - SortedNumericDocValues docValues = DocValues.getSortedNumeric(context.reader(), fieldName); - NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + SortedNumericDocValues docValues = context.reader().getSortedNumericDocValues(fieldName); + if (docValues != null) { + NumericDocValues singleton = DocValues.unwrapSingleton(docValues); + if (singleton != null) { + return new SingletonBooleans(singleton); + } + return new Booleans(docValues); + } + NumericDocValues singleton = context.reader().getNumericDocValues(fieldName); if (singleton != null) { return new SingletonBooleans(singleton); } - return new Booleans(docValues); + return new ConstantNullsReader(); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockLoaderStoredFieldsFromLeafLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockLoaderStoredFieldsFromLeafLoader.java index 8b1b794f1df55..0090935f51bc3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockLoaderStoredFieldsFromLeafLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockLoaderStoredFieldsFromLeafLoader.java @@ -17,18 +17,18 @@ public class BlockLoaderStoredFieldsFromLeafLoader implements BlockLoader.StoredFields { private final LeafStoredFieldLoader loader; - private final boolean loadSource; + private final SourceLoader.Leaf sourceLoader; private Source source; - public BlockLoaderStoredFieldsFromLeafLoader(LeafStoredFieldLoader loader, boolean loadSource) { + public BlockLoaderStoredFieldsFromLeafLoader(LeafStoredFieldLoader loader, SourceLoader.Leaf sourceLoader) { this.loader = loader; - this.loadSource = loadSource; + this.sourceLoader = sourceLoader; } public void advanceTo(int doc) throws IOException { loader.advanceTo(doc); - if (loadSource) { - source = Source.fromBytes(loader.source()); + if (sourceLoader != null) { + source = sourceLoader.source(loader, doc); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java index 289b28949cdab..12b5ff0e82a03 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java @@ -35,7 +35,7 @@ public abstract class BlockSourceReader implements BlockLoader.RowStrideReader { public final void read(int docId, BlockLoader.StoredFields storedFields, BlockLoader.Builder builder) throws IOException { List values = fetcher.fetchValues(storedFields.source(), docId, ignoredValues); ignoredValues.clear(); // TODO do something with these? - if (values == null) { + if (values == null || values.isEmpty()) { builder.appendNull(); return; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockStoredFieldsReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockStoredFieldsReader.java index 043ca38b1c78b..0a6cde773ff48 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockStoredFieldsReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockStoredFieldsReader.java @@ -42,7 +42,7 @@ private abstract static class StoredFieldsBlockLoader implements BlockLoader { } @Override - public final ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) throws IOException { + public final ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) { return null; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldBlockLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldBlockLoader.java new file mode 100644 index 0000000000000..63455379044f7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldBlockLoader.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedSetDocValues; +import org.elasticsearch.search.fetch.StoredFieldsSpec; + +import java.io.IOException; +import java.util.Set; + +/** + * Load {@code _source} into blocks. + */ +public final class SourceFieldBlockLoader implements BlockLoader { + @Override + public Builder builder(BlockFactory factory, int expectedCount) { + return factory.bytesRefs(expectedCount); + } + + @Override + public ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) { + return null; + } + + @Override + public RowStrideReader rowStrideReader(LeafReaderContext context) throws IOException { + return new Source(); + } + + @Override + public StoredFieldsSpec rowStrideStoredFieldSpec() { + return new StoredFieldsSpec(true, false, Set.of()); + } + + @Override + public boolean supportsOrdinals() { + return false; + } + + @Override + public SortedSetDocValues ordinals(LeafReaderContext context) { + throw new UnsupportedOperationException(); + } + + private static class Source extends BlockStoredFieldsReader { + @Override + public void read(int docId, StoredFields storedFields, Builder builder) throws IOException { + // TODO support appending BytesReference + ((BytesRefBuilder) builder).appendBytesRef(storedFields.source().internalSourceRef().toBytesRef()); + } + + @Override + public String toString() { + return "BlockStoredFieldsReader.Source"; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index 42121147d7f09..958db80ae64c2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -192,9 +192,11 @@ private IndexMode getIndexMode() { ); static final class SourceFieldType extends MappedFieldType { + private final boolean enabled; private SourceFieldType(boolean enabled) { super(NAME, false, enabled, false, TextSearchInfo.NONE, Collections.emptyMap()); + this.enabled = enabled; } @Override @@ -216,6 +218,14 @@ public Query existsQuery(SearchExecutionContext context) { public Query termQuery(Object value, SearchExecutionContext context) { throw new QueryShardException(context, "The _source field is not searchable"); } + + @Override + public BlockLoader blockLoader(BlockLoaderContext blContext) { + if (enabled) { + return new SourceFieldBlockLoader(); + } + return BlockLoader.CONSTANT_NULLS; + } } // nullable for bwc reasons diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 305494697216f..0faa66a9d21da 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -138,7 +138,6 @@ import org.elasticsearch.search.query.QueryPhase; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -272,61 +271,35 @@ protected void doStart() { } @SuppressWarnings("this-escape") - public IndicesService( - Settings settings, - PluginsService pluginsService, - NodeEnvironment nodeEnv, - NamedXContentRegistry xContentRegistry, - AnalysisRegistry analysisRegistry, - IndexNameExpressionResolver indexNameExpressionResolver, - MapperRegistry mapperRegistry, - NamedWriteableRegistry namedWriteableRegistry, - ThreadPool threadPool, - IndexScopedSettings indexScopedSettings, - CircuitBreakerService circuitBreakerService, - BigArrays bigArrays, - ScriptService scriptService, - ClusterService clusterService, - Client client, - FeatureService featureService, - MetaStateService metaStateService, - Collection>> engineFactoryProviders, - Map directoryFactories, - ValuesSourceRegistry valuesSourceRegistry, - Map recoveryStateFactories, - List indexFoldersDeletionListeners, - Map snapshotCommitSuppliers, - CheckedBiConsumer requestCacheKeyDifferentiator, - Supplier documentParsingObserverSupplier - ) { - this.settings = settings; - this.threadPool = threadPool; - this.pluginsService = pluginsService; - this.nodeEnv = nodeEnv; + IndicesService(IndicesServiceBuilder builder) { + this.settings = builder.settings; + this.threadPool = builder.threadPool; + this.pluginsService = builder.pluginsService; + this.nodeEnv = builder.nodeEnv; this.parserConfig = XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE) - .withRegistry(xContentRegistry); - this.valuesSourceRegistry = valuesSourceRegistry; + .withRegistry(builder.xContentRegistry); + this.valuesSourceRegistry = builder.valuesSourceRegistry; this.shardsClosedTimeout = settings.getAsTime(INDICES_SHARDS_CLOSED_TIMEOUT, new TimeValue(1, TimeUnit.DAYS)); - this.analysisRegistry = analysisRegistry; - this.indexNameExpressionResolver = indexNameExpressionResolver; + this.analysisRegistry = builder.analysisRegistry; + this.indexNameExpressionResolver = builder.indexNameExpressionResolver; this.indicesRequestCache = new IndicesRequestCache(settings); this.indicesQueryCache = new IndicesQueryCache(settings); - this.mapperRegistry = mapperRegistry; - this.namedWriteableRegistry = namedWriteableRegistry; - this.documentParsingObserverSupplier = documentParsingObserverSupplier; + this.mapperRegistry = builder.mapperRegistry; + this.namedWriteableRegistry = builder.namedWriteableRegistry; + this.documentParsingObserverSupplier = builder.documentParsingObserverSupplier; indexingMemoryController = new IndexingMemoryController( settings, threadPool, // ensure we pull an iter with new shards - flatten makes a copy () -> Iterables.flatten(this).iterator() ); - this.indexScopedSettings = indexScopedSettings; - this.circuitBreakerService = circuitBreakerService; - this.bigArrays = bigArrays; - this.scriptService = scriptService; - this.clusterService = clusterService; - this.client = client; - this.featureService = featureService; + this.indexScopedSettings = builder.indexScopedSettings; + this.circuitBreakerService = builder.circuitBreakerService; + this.bigArrays = builder.bigArrays; + this.scriptService = builder.scriptService; + this.clusterService = builder.clusterService; + this.client = builder.client; + this.featureService = builder.featureService; this.idFieldDataEnabled = INDICES_ID_FIELD_DATA_ENABLED_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings().addSettingsUpdateConsumer(INDICES_ID_FIELD_DATA_ENABLED_SETTING, this::setIdFieldDataEnabled); this.indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() { @@ -342,21 +315,21 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon }); this.cleanInterval = INDICES_CACHE_CLEAN_INTERVAL_SETTING.get(settings); this.cacheCleaner = new CacheCleaner(indicesFieldDataCache, indicesRequestCache, threadPool, this.cleanInterval); - this.metaStateService = metaStateService; - this.engineFactoryProviders = engineFactoryProviders; + this.metaStateService = builder.metaStateService; + this.engineFactoryProviders = builder.engineFactoryProviders; // do not allow any plugin-provided index store type to conflict with a built-in type - for (final String indexStoreType : directoryFactories.keySet()) { + for (final String indexStoreType : builder.directoryFactories.keySet()) { if (IndexModule.isBuiltinType(indexStoreType)) { throw new IllegalStateException("registered index store type [" + indexStoreType + "] conflicts with a built-in type"); } } - this.directoryFactories = directoryFactories; - this.recoveryStateFactories = recoveryStateFactories; - this.indexFoldersDeletionListeners = new CompositeIndexFoldersDeletionListener(indexFoldersDeletionListeners); - this.snapshotCommitSuppliers = snapshotCommitSuppliers; - this.requestCacheKeyDifferentiator = requestCacheKeyDifferentiator; + this.directoryFactories = builder.directoryFactories; + this.recoveryStateFactories = builder.recoveryStateFactories; + this.indexFoldersDeletionListeners = new CompositeIndexFoldersDeletionListener(builder.indexFoldersDeletionListeners); + this.snapshotCommitSuppliers = builder.snapshotCommitSuppliers; + this.requestCacheKeyDifferentiator = builder.requestCacheKeyDifferentiator; // doClose() is called when shutting down a node, yet there might still be ongoing requests // that we need to wait for before closing some resources such as the caches. In order to // avoid closing these resources while ongoing requests are still being processed, we use a diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java b/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java new file mode 100644 index 0000000000000..a5cd00bb86094 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/IndicesServiceBuilder.java @@ -0,0 +1,232 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices; + +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.gateway.MetaStateService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.mapper.MapperRegistry; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.plugins.EnginePlugin; +import org.elasticsearch.plugins.IndexStorePlugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.internal.DocumentParsingObserver; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; +import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.NamedXContentRegistry; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +public class IndicesServiceBuilder { + Settings settings; + PluginsService pluginsService; + NodeEnvironment nodeEnv; + NamedXContentRegistry xContentRegistry; + AnalysisRegistry analysisRegistry; + IndexNameExpressionResolver indexNameExpressionResolver; + MapperRegistry mapperRegistry; + NamedWriteableRegistry namedWriteableRegistry; + ThreadPool threadPool; + IndexScopedSettings indexScopedSettings; + CircuitBreakerService circuitBreakerService; + BigArrays bigArrays; + ScriptService scriptService; + ClusterService clusterService; + Client client; + FeatureService featureService; + MetaStateService metaStateService; + Collection>> engineFactoryProviders = List.of(); + Map directoryFactories = Map.of(); + @Nullable + ValuesSourceRegistry valuesSourceRegistry; + Map recoveryStateFactories = Map.of(); + List indexFoldersDeletionListeners = List.of(); + Map snapshotCommitSuppliers = Map.of(); + @Nullable + CheckedBiConsumer requestCacheKeyDifferentiator; + Supplier documentParsingObserverSupplier; + + public IndicesServiceBuilder settings(Settings settings) { + this.settings = settings; + return this; + } + + public IndicesServiceBuilder pluginsService(PluginsService pluginsService) { + this.pluginsService = pluginsService; + return this; + } + + public IndicesServiceBuilder nodeEnvironment(NodeEnvironment nodeEnv) { + this.nodeEnv = nodeEnv; + return this; + } + + public IndicesServiceBuilder xContentRegistry(NamedXContentRegistry xContentRegistry) { + this.xContentRegistry = xContentRegistry; + return this; + } + + public IndicesServiceBuilder analysisRegistry(AnalysisRegistry analysisRegistry) { + this.analysisRegistry = analysisRegistry; + return this; + } + + public IndicesServiceBuilder indexNameExpressionResolver(IndexNameExpressionResolver indexNameExpressionResolver) { + this.indexNameExpressionResolver = indexNameExpressionResolver; + return this; + } + + public IndicesServiceBuilder mapperRegistry(MapperRegistry mapperRegistry) { + this.mapperRegistry = mapperRegistry; + return this; + } + + public IndicesServiceBuilder namedWriteableRegistry(NamedWriteableRegistry namedWriteableRegistry) { + this.namedWriteableRegistry = namedWriteableRegistry; + return this; + } + + public IndicesServiceBuilder threadPool(ThreadPool threadPool) { + this.threadPool = threadPool; + return this; + } + + public IndicesServiceBuilder indexScopedSettings(IndexScopedSettings indexScopedSettings) { + this.indexScopedSettings = indexScopedSettings; + return this; + } + + public IndicesServiceBuilder circuitBreakerService(CircuitBreakerService circuitBreakerService) { + this.circuitBreakerService = circuitBreakerService; + return this; + } + + public IndicesServiceBuilder bigArrays(BigArrays bigArrays) { + this.bigArrays = bigArrays; + return this; + } + + public IndicesServiceBuilder scriptService(ScriptService scriptService) { + this.scriptService = scriptService; + return this; + } + + public IndicesServiceBuilder clusterService(ClusterService clusterService) { + this.clusterService = clusterService; + return this; + } + + public IndicesServiceBuilder client(Client client) { + this.client = client; + return this; + } + + public IndicesServiceBuilder featureService(FeatureService featureService) { + this.featureService = featureService; + return this; + } + + public IndicesServiceBuilder metaStateService(MetaStateService metaStateService) { + this.metaStateService = metaStateService; + return this; + } + + public IndicesServiceBuilder valuesSourceRegistry(ValuesSourceRegistry valuesSourceRegistry) { + this.valuesSourceRegistry = valuesSourceRegistry; + return this; + } + + public IndicesServiceBuilder requestCacheKeyDifferentiator( + CheckedBiConsumer requestCacheKeyDifferentiator + ) { + this.requestCacheKeyDifferentiator = requestCacheKeyDifferentiator; + return this; + } + + public IndicesServiceBuilder documentParsingObserverSupplier(Supplier documentParsingObserverSupplier) { + this.documentParsingObserverSupplier = documentParsingObserverSupplier; + return this; + } + + public IndicesService build() { + Objects.requireNonNull(settings); + Objects.requireNonNull(pluginsService); + Objects.requireNonNull(nodeEnv); + Objects.requireNonNull(xContentRegistry); + Objects.requireNonNull(analysisRegistry); + Objects.requireNonNull(indexNameExpressionResolver); + Objects.requireNonNull(mapperRegistry); + Objects.requireNonNull(namedWriteableRegistry); + Objects.requireNonNull(threadPool); + Objects.requireNonNull(indexScopedSettings); + Objects.requireNonNull(circuitBreakerService); + Objects.requireNonNull(bigArrays); + Objects.requireNonNull(scriptService); + Objects.requireNonNull(clusterService); + Objects.requireNonNull(client); + Objects.requireNonNull(featureService); + Objects.requireNonNull(metaStateService); + Objects.requireNonNull(engineFactoryProviders); + Objects.requireNonNull(directoryFactories); + Objects.requireNonNull(recoveryStateFactories); + Objects.requireNonNull(indexFoldersDeletionListeners); + Objects.requireNonNull(snapshotCommitSuppliers); + Objects.requireNonNull(documentParsingObserverSupplier); + + // collect engine factory providers from plugins + engineFactoryProviders = pluginsService.filterPlugins(EnginePlugin.class) + .>>map(plugin -> plugin::getEngineFactory) + .toList(); + + directoryFactories = pluginsService.filterPlugins(IndexStorePlugin.class) + .map(IndexStorePlugin::getDirectoryFactories) + .flatMap(m -> m.entrySet().stream()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + recoveryStateFactories = pluginsService.filterPlugins(IndexStorePlugin.class) + .map(IndexStorePlugin::getRecoveryStateFactories) + .flatMap(m -> m.entrySet().stream()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + indexFoldersDeletionListeners = pluginsService.filterPlugins(IndexStorePlugin.class) + .map(IndexStorePlugin::getIndexFoldersDeletionListeners) + .flatMap(List::stream) + .toList(); + + snapshotCommitSuppliers = pluginsService.filterPlugins(IndexStorePlugin.class) + .map(IndexStorePlugin::getSnapshotCommitSuppliers) + .flatMap(m -> m.entrySet().stream()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + return new IndicesService(this); + } +} diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 86b6013895263..709b990ecfdb8 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -501,7 +501,7 @@ static OverLimitStrategy createOverLimitStrategy(boolean trackRealMemoryUsage) { HierarchyCircuitBreakerService::realMemoryUsage, createYoungGcCountSupplier(), System::currentTimeMillis, - 5000, + 500, lockTimeout ); } else { @@ -542,6 +542,8 @@ static class G1OverLimitStrategy implements OverLimitStrategy { private long blackHole; private final ReleasableLock lock = new ReleasableLock(new ReentrantLock()); + // used to throttle logging + private int attemptNo; G1OverLimitStrategy( JvmInfo jvmInfo, @@ -588,9 +590,12 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { boolean leader = false; int allocationIndex = 0; long allocationDuration = 0; + long begin = 0; + int attemptNoCopy = 0; try (ReleasableLock locked = lock.tryAcquire(lockTimeout)) { if (locked != null) { - long begin = timeSupplier.getAsLong(); + attemptNoCopy = ++this.attemptNo; + begin = timeSupplier.getAsLong(); leader = begin >= lastCheckTime + minimumInterval; overLimitTriggered(leader); if (leader) { @@ -622,9 +627,11 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { long now = timeSupplier.getAsLong(); this.lastCheckTime = now; allocationDuration = now - begin; + this.attemptNo = 0; } } } catch (InterruptedException e) { + logger.info("could not acquire lock when attempting to trigger G1GC due to high heap usage"); Thread.currentThread().interrupt(); // fallthrough } @@ -639,6 +646,13 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { allocationIndex, allocationDuration ); + } else if (attemptNoCopy < 10 || Long.bitCount(attemptNoCopy) == 1) { + logger.info( + "memory usage down after [{}], before [{}], after [{}]", + begin - lastCheckTime, + memoryUsed.baseUsage, + current + ); } return new MemoryUsage( current, @@ -655,6 +669,13 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { allocationIndex, allocationDuration ); + } else if (attemptNoCopy < 10 || Long.bitCount(attemptNoCopy) == 1) { + logger.info( + "memory usage not down after [{}], before [{}], after [{}]", + begin - lastCheckTime, + memoryUsed.baseUsage, + current + ); } // prefer original measurement when reporting if heap usage was not brought down. return memoryUsed; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 47405e0daa0a7..2e10a5de2d4e1 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -14,7 +14,6 @@ import org.apache.lucene.store.RateLimiter.SimpleRateLimiter; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.settings.ClusterSettings; @@ -47,7 +46,6 @@ import static org.elasticsearch.node.NodeRoleSettings.NODE_ROLES_SETTING; public class RecoverySettings { - public static final Version SNAPSHOT_RECOVERIES_SUPPORTED_VERSION = Version.V_7_15_0; public static final IndexVersion SNAPSHOT_RECOVERIES_SUPPORTED_INDEX_VERSION = IndexVersions.V_7_15_0; public static final TransportVersion SNAPSHOT_RECOVERIES_SUPPORTED_TRANSPORT_VERSION = TransportVersions.V_7_15_0; public static final IndexVersion SEQ_NO_SNAPSHOT_RECOVERIES_SUPPORTED_VERSION = IndexVersions.V_7_16_0; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java b/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java index 07d62fb87fe55..e15ec4c339a94 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java @@ -51,7 +51,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.indices.recovery.RecoverySettings.SNAPSHOT_RECOVERIES_SUPPORTED_VERSION; public class ShardSnapshotsService { private static final Logger logger = LogManager.getLogger(ShardSnapshotsService.class); @@ -84,13 +83,8 @@ public void fetchLatestSnapshotsForShard(ShardId shardId, ActionListener fetchSnapshotFiles(ShardId shardId, GetShardSnap } } - protected boolean masterSupportsFetchingLatestSnapshots() { - return clusterService.state().nodes().getMinNodeVersion().onOrAfter(SNAPSHOT_RECOVERIES_SUPPORTED_VERSION); - } - private static final class StoreFileMetadataDirectory extends Directory { private final Map files; diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 0dba888b91436..18f40af790f15 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -102,13 +102,12 @@ import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettingProviders; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.analysis.AnalysisRegistry; -import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.indices.ExecutorSelector; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.IndicesServiceBuilder; import org.elasticsearch.indices.ShardLimitValidator; import org.elasticsearch.indices.SystemIndexMappingUpdateService; import org.elasticsearch.indices.SystemIndices; @@ -140,9 +139,7 @@ import org.elasticsearch.plugins.ClusterCoordinationPlugin; import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.DiscoveryPlugin; -import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.HealthPlugin; -import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.plugins.InferenceServicePlugin; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.MapperPlugin; @@ -693,34 +690,6 @@ private void construct( compatibilityVersions ); - // collect engine factory providers from plugins - final Collection>> engineFactoryProviders = pluginsService.filterPlugins( - EnginePlugin.class - ).>>map(plugin -> plugin::getEngineFactory).toList(); - - final Map indexStoreFactories = pluginsService.filterPlugins(IndexStorePlugin.class) - .map(IndexStorePlugin::getDirectoryFactories) - .flatMap(m -> m.entrySet().stream()) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - - final Map recoveryStateFactories = pluginsService.filterPlugins( - IndexStorePlugin.class - ) - .map(IndexStorePlugin::getRecoveryStateFactories) - .flatMap(m -> m.entrySet().stream()) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - - final List indexFoldersDeletionListeners = pluginsService.filterPlugins( - IndexStorePlugin.class - ).map(IndexStorePlugin::getIndexFoldersDeletionListeners).flatMap(List::stream).toList(); - - final Map snapshotCommitSuppliers = pluginsService.filterPlugins( - IndexStorePlugin.class - ) - .map(IndexStorePlugin::getSnapshotCommitSuppliers) - .flatMap(m -> m.entrySet().stream()) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - FeatureService featureService = new FeatureService(pluginsService.loadServiceProviders(FeatureSpecification.class)); if (DiscoveryNode.isMasterNode(settings)) { @@ -734,33 +703,27 @@ private void construct( rerouteServiceReference.set(rerouteService); clusterService.setRerouteService(rerouteService); - final IndicesService indicesService = new IndicesService( - settings, - pluginsService, - nodeEnvironment, - xContentRegistry, - analysisRegistry, - clusterModule.getIndexNameExpressionResolver(), - indicesModule.getMapperRegistry(), - namedWriteableRegistry, - threadPool, - settingsModule.getIndexScopedSettings(), - circuitBreakerService, - bigArrays, - scriptService, - clusterService, - client, - featureService, - metaStateService, - engineFactoryProviders, - indexStoreFactories, - searchModule.getValuesSourceRegistry(), - recoveryStateFactories, - indexFoldersDeletionListeners, - snapshotCommitSuppliers, - searchModule.getRequestCacheKeyDifferentiator(), - documentParsingObserverSupplier - ); + IndicesService indicesService = new IndicesServiceBuilder().settings(settings) + .pluginsService(pluginsService) + .nodeEnvironment(nodeEnvironment) + .xContentRegistry(xContentRegistry) + .analysisRegistry(analysisRegistry) + .indexNameExpressionResolver(clusterModule.getIndexNameExpressionResolver()) + .mapperRegistry(indicesModule.getMapperRegistry()) + .namedWriteableRegistry(namedWriteableRegistry) + .threadPool(threadPool) + .indexScopedSettings(settingsModule.getIndexScopedSettings()) + .circuitBreakerService(circuitBreakerService) + .bigArrays(bigArrays) + .scriptService(scriptService) + .clusterService(clusterService) + .client(client) + .featureService(featureService) + .metaStateService(metaStateService) + .valuesSourceRegistry(searchModule.getValuesSourceRegistry()) + .requestCacheKeyDifferentiator(searchModule.getRequestCacheKeyDifferentiator()) + .documentParsingObserverSupplier(documentParsingObserverSupplier) + .build(); final var parameters = new IndexSettingProvider.Parameters(indicesService::createIndexMapperServiceForValidation); IndexSettingProviders indexSettingProviders = new IndexSettingProviders( diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index c4590cf52c845..32347047813a3 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -1583,14 +1583,6 @@ public AliasFilter buildAliasFilter(ClusterState state, String index, Set listener) { - try { - listener.onResponse(canMatch(request)); - } catch (IOException e) { - listener.onFailure(e); - } - } - public void canMatch(CanMatchNodeRequest request, ActionListener listener) { final List shardSearchRequests = request.createShardSearchRequests(); final List responses = new ArrayList<>(shardSearchRequests.size()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java index 18b1f44ce5d7f..77cb482edd8b4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java @@ -98,7 +98,6 @@ int getSize() { @Override public InternalAggregation reduce(List aggregations, AggregationReduceContext reduceContext) { - final SearchHits[] shardHits = new SearchHits[aggregations.size()]; final int from; final int size; if (reduceContext.isFinalReduce()) { @@ -113,65 +112,66 @@ public InternalAggregation reduce(List aggregations, Aggreg final TopDocs reducedTopDocs; final TopDocs[] shardDocs; - - if (topDocs.topDocs instanceof TopFieldDocs) { - Sort sort = new Sort(((TopFieldDocs) topDocs.topDocs).fields); + final float maxScore; + if (topDocs.topDocs instanceof TopFieldDocs topFieldDocs) { shardDocs = new TopFieldDocs[aggregations.size()]; - for (int i = 0; i < shardDocs.length; i++) { - InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i); - shardDocs[i] = topHitsAgg.topDocs.topDocs; - shardHits[i] = topHitsAgg.searchHits; - for (ScoreDoc doc : shardDocs[i].scoreDocs) { - doc.shardIndex = i; - } - } - reducedTopDocs = TopDocs.merge(sort, from, size, (TopFieldDocs[]) shardDocs); + maxScore = reduceAndFindMaxScore(aggregations, shardDocs); + reducedTopDocs = TopDocs.merge(new Sort(topFieldDocs.fields), from, size, (TopFieldDocs[]) shardDocs); } else { shardDocs = new TopDocs[aggregations.size()]; - for (int i = 0; i < shardDocs.length; i++) { - InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i); - shardDocs[i] = topHitsAgg.topDocs.topDocs; - shardHits[i] = topHitsAgg.searchHits; - for (ScoreDoc doc : shardDocs[i].scoreDocs) { - doc.shardIndex = i; - } - } + maxScore = reduceAndFindMaxScore(aggregations, shardDocs); reducedTopDocs = TopDocs.merge(from, size, shardDocs); } - - float maxScore = Float.NaN; - for (InternalAggregation agg : aggregations) { - InternalTopHits topHitsAgg = (InternalTopHits) agg; - if (Float.isNaN(topHitsAgg.topDocs.maxScore) == false) { - if (Float.isNaN(maxScore)) { - maxScore = topHitsAgg.topDocs.maxScore; - } else { - maxScore = Math.max(maxScore, topHitsAgg.topDocs.maxScore); - } - } - } - - final int[] tracker = new int[shardHits.length]; - SearchHit[] hits = new SearchHit[reducedTopDocs.scoreDocs.length]; - for (int i = 0; i < reducedTopDocs.scoreDocs.length; i++) { - ScoreDoc scoreDoc = reducedTopDocs.scoreDocs[i]; - int position; - do { - position = tracker[scoreDoc.shardIndex]++; - } while (shardDocs[scoreDoc.shardIndex].scoreDocs[position] != scoreDoc); - hits[i] = shardHits[scoreDoc.shardIndex].getAt(position); - } assert reducedTopDocs.totalHits.relation == Relation.EQUAL_TO; + return new InternalTopHits( name, this.from, this.size, new TopDocsAndMaxScore(reducedTopDocs, maxScore), - new SearchHits(hits, reducedTopDocs.totalHits, maxScore), + extractSearchHits(aggregations, reducedTopDocs, shardDocs, maxScore), getMetadata() ); } + private static SearchHits extractSearchHits( + List aggregations, + TopDocs reducedTopDocs, + TopDocs[] shardDocs, + float maxScore + ) { + final int[] tracker = new int[aggregations.size()]; + ScoreDoc[] scoreDocs = reducedTopDocs.scoreDocs; + SearchHit[] hits = new SearchHit[scoreDocs.length]; + for (int i = 0; i < scoreDocs.length; i++) { + ScoreDoc scoreDoc = scoreDocs[i]; + int shardIndex = scoreDoc.shardIndex; + TopDocs topDocsForShard = shardDocs[shardIndex]; + int position; + do { + position = tracker[shardIndex]++; + } while (topDocsForShard.scoreDocs[position] != scoreDoc); + hits[i] = ((InternalTopHits) aggregations.get(shardIndex)).searchHits.getAt(position); + } + return new SearchHits(hits, reducedTopDocs.totalHits, maxScore); + } + + private static float reduceAndFindMaxScore(List aggregations, TopDocs[] shardDocs) { + float maxScore = Float.NaN; + for (int i = 0; i < shardDocs.length; i++) { + InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i); + shardDocs[i] = topHitsAgg.topDocs.topDocs; + for (ScoreDoc doc : shardDocs[i].scoreDocs) { + doc.shardIndex = i; + } + final float max = topHitsAgg.topDocs.maxScore; + if (Float.isNaN(max) == false) { + maxScore = Float.isNaN(maxScore) ? max : Math.max(maxScore, max); + } + } + return maxScore; + } + @Override public InternalAggregation finalizeSampling(SamplingContext samplingContext) { return this; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityActionGuideTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityActionGuideTests.java index 4777b0eb357da..b731fd79c82fe 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityActionGuideTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityActionGuideTests.java @@ -8,17 +8,17 @@ package org.elasticsearch.cluster.routing.allocation; +import org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.test.ESTestCase; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_CHECK_ALLOCATION_EXPLAIN_API; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_CLUSTER_ROUTING_ALLOCATION; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_INDEX_ROUTING_ALLOCATION; -import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_TIERS_LOOKUP; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_NODE_CAPACITY; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING; -import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING; -import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_INCLUDE_DATA; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_REQUIRE_DATA; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_RESTORE_FROM_SNAPSHOT; @@ -32,9 +32,16 @@ import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.RESTORE_FROM_SNAPSHOT_ACTION_GUIDE; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.TIER_CAPACITY_ACTION_GUIDE; import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; public class ShardsAvailabilityActionGuideTests extends ESTestCase { + private final ShardsAvailabilityHealthIndicatorService service = new ShardsAvailabilityHealthIndicatorService( + mock(ClusterService.class), + mock(AllocationService.class), + mock(SystemIndices.class) + ); + public void testRestoreFromSnapshotAction() { assertThat(ACTION_RESTORE_FROM_SNAPSHOT.helpURL(), is(RESTORE_FROM_SNAPSHOT_ACTION_GUIDE)); } @@ -60,20 +67,17 @@ public void testEnableClusterRoutingAllocation() { } public void testEnableDataTiers() { - assertThat(ACTION_ENABLE_TIERS_LOOKUP.get(DataTier.DATA_HOT).helpURL(), is(ENABLE_TIER_ACTION_GUIDE)); + assertThat(service.getAddNodesWithRoleAction(DataTier.DATA_HOT).helpURL(), is(ENABLE_TIER_ACTION_GUIDE)); } public void testIncreaseShardLimitIndexSettingInTier() { - assertThat( - ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP.get(DataTier.DATA_HOT).helpURL(), - is(INCREASE_SHARD_LIMIT_ACTION_GUIDE) - ); + assertThat(service.getIncreaseShardLimitIndexSettingAction(DataTier.DATA_HOT).helpURL(), is(INCREASE_SHARD_LIMIT_ACTION_GUIDE)); assertThat(ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING.helpURL(), is(INCREASE_SHARD_LIMIT_ACTION_GUIDE)); } public void testIncreaseShardLimitClusterSettingInTier() { assertThat( - ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP.get(DataTier.DATA_HOT).helpURL(), + service.getIncreaseShardLimitClusterSettingAction(DataTier.DATA_HOT).helpURL(), is(INCREASE_CLUSTER_SHARD_LIMIT_ACTION_GUIDE) ); assertThat(ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING.helpURL(), is(INCREASE_CLUSTER_SHARD_LIMIT_ACTION_GUIDE)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java index 14402d1e571ec..bb7523661a0fa 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java @@ -85,13 +85,9 @@ import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_CHECK_ALLOCATION_EXPLAIN_API; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_CLUSTER_ROUTING_ALLOCATION; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_INDEX_ROUTING_ALLOCATION; -import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_TIERS_LOOKUP; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_NODE_CAPACITY; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING; -import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING; -import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP; -import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_TIER_CAPACITY_LOOKUP; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_INCLUDE_DATA; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_INCLUDE_DATA_LOOKUP; import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_REQUIRE_DATA; @@ -1115,7 +1111,7 @@ public void testDiagnoseEnableDataTiers() { var service = createShardsAvailabilityIndicatorService(); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( // Shard is not allowed due to data tier filter @@ -1125,11 +1121,12 @@ public void testDiagnoseEnableDataTiers() { 1 ) ), - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + null ); assertThat(actions, hasSize(1)); - assertThat(actions, contains(ACTION_ENABLE_TIERS_LOOKUP.get(DataTier.DATA_HOT))); + assertThat(actions, contains(service.getAddNodesWithRoleAction(DataTier.DATA_HOT))); } public void testDiagnoseIncreaseShardLimitIndexSettingInTier() { @@ -1173,7 +1170,7 @@ public void testDiagnoseIncreaseShardLimitIndexSettingInTier() { var service = createShardsAvailabilityIndicatorService(); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( new NodeAllocationResult( @@ -1184,11 +1181,12 @@ public void testDiagnoseIncreaseShardLimitIndexSettingInTier() { 1 ) ), - clusterState + clusterState, + null ); assertThat(actions, hasSize(1)); - assertThat(actions, contains(ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP.get(DataTier.DATA_HOT))); + assertThat(actions, contains(service.getIncreaseShardLimitIndexSettingAction(DataTier.DATA_HOT))); } public void testDiagnoseIncreaseShardLimitClusterSettingInTier() { @@ -1237,7 +1235,7 @@ public void testDiagnoseIncreaseShardLimitClusterSettingInTier() { ); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( new NodeAllocationResult( @@ -1248,11 +1246,12 @@ public void testDiagnoseIncreaseShardLimitClusterSettingInTier() { 1 ) ), - clusterState + clusterState, + null ); assertThat(actions, hasSize(1)); - assertThat(actions, contains(ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP.get(DataTier.DATA_HOT))); + assertThat(actions, contains(service.getIncreaseShardLimitClusterSettingAction(DataTier.DATA_HOT))); } public void testDiagnoseIncreaseShardLimitIndexSettingInGeneral() { @@ -1296,7 +1295,7 @@ public void testDiagnoseIncreaseShardLimitIndexSettingInGeneral() { var service = createShardsAvailabilityIndicatorService(); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( new NodeAllocationResult( @@ -1307,7 +1306,8 @@ public void testDiagnoseIncreaseShardLimitIndexSettingInGeneral() { 1 ) ), - clusterState + clusterState, + null ); assertThat(actions, hasSize(1)); @@ -1360,7 +1360,7 @@ public void testDiagnoseIncreaseShardLimitClusterSettingInGeneral() { ); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( new NodeAllocationResult( @@ -1371,7 +1371,8 @@ public void testDiagnoseIncreaseShardLimitClusterSettingInGeneral() { 1 ) ), - clusterState + clusterState, + null ); assertThat(actions, hasSize(1)); @@ -1395,7 +1396,7 @@ public void testDiagnoseMigrateDataRequiredToDataTiers() { var service = createShardsAvailabilityIndicatorService(); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( // Shard is allowed on data tier, but disallowed because of allocation filters @@ -1407,7 +1408,8 @@ public void testDiagnoseMigrateDataRequiredToDataTiers() { 1 ) ), - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + null ); assertThat(actions, hasSize(1)); @@ -1431,7 +1433,7 @@ public void testDiagnoseMigrateDataIncludedToDataTiers() { var service = createShardsAvailabilityIndicatorService(); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( // Shard is allowed on data tier, but disallowed because of allocation filters @@ -1443,7 +1445,8 @@ public void testDiagnoseMigrateDataIncludedToDataTiers() { 1 ) ), - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + null ); assertThat(actions, hasSize(1)); @@ -1466,7 +1469,7 @@ public void testDiagnoseOtherFilteringIssue() { var service = createShardsAvailabilityIndicatorService(); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( // Shard is allowed on data tier, but disallowed because of allocation filters @@ -1478,7 +1481,8 @@ public void testDiagnoseOtherFilteringIssue() { 1 ) ), - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + null ); // checkDataTierRelatedIssues will leave list empty. Diagnosis methods upstream will add "Check allocation explain" action. @@ -1501,7 +1505,7 @@ public void testDiagnoseIncreaseTierCapacity() { var service = createShardsAvailabilityIndicatorService(); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( // Shard is allowed on data tier, but disallowed because node is already hosting a copy of it. @@ -1517,11 +1521,12 @@ public void testDiagnoseIncreaseTierCapacity() { 1 ) ), - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + null ); assertThat(actions, hasSize(1)); - assertThat(actions, contains(ACTION_INCREASE_TIER_CAPACITY_LOOKUP.get(DataTier.DATA_HOT))); + assertThat(actions, contains(service.getIncreaseNodeWithRoleCapacityAction(DataTier.DATA_HOT))); } public void testDiagnoseIncreaseNodeCapacity() { @@ -1540,7 +1545,7 @@ public void testDiagnoseIncreaseNodeCapacity() { var service = createShardsAvailabilityIndicatorService(); // Get the list of user actions that are generated for this unassigned index shard - List actions = service.checkDataTierRelatedIssues( + List actions = service.checkNodeRoleRelatedIssues( indexMetadata, List.of( // Shard is allowed on data tier, but disallowed because node is already hosting a copy of it. @@ -1556,7 +1561,8 @@ public void testDiagnoseIncreaseNodeCapacity() { 1 ) ), - ClusterState.EMPTY_STATE + ClusterState.EMPTY_STATE, + null ); assertThat(actions, hasSize(1)); @@ -1874,17 +1880,22 @@ public void testMappedFieldsForTelemetry() { DIAGNOSIS_WAIT_FOR_INITIALIZATION.getUniqueId(), equalTo("elasticsearch:health:shards_availability:diagnosis:initializing_shards") ); + var service = new ShardsAvailabilityHealthIndicatorService( + mock(ClusterService.class), + mock(AllocationService.class), + mock(SystemIndices.class) + ); for (String tier : List.of("data_content", "data_hot", "data_warm", "data_cold", "data_frozen")) { assertThat( - ACTION_ENABLE_TIERS_LOOKUP.get(tier).getUniqueId(), + service.getAddNodesWithRoleAction(tier).getUniqueId(), equalTo("elasticsearch:health:shards_availability:diagnosis:enable_data_tiers:tier:" + tier) ); assertThat( - ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP.get(tier).getUniqueId(), + service.getIncreaseShardLimitIndexSettingAction(tier).getUniqueId(), equalTo("elasticsearch:health:shards_availability:diagnosis:increase_shard_limit_index_setting:tier:" + tier) ); assertThat( - ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP.get(tier).getUniqueId(), + service.getIncreaseShardLimitClusterSettingAction(tier).getUniqueId(), equalTo("elasticsearch:health:shards_availability:diagnosis:increase_shard_limit_cluster_setting:tier:" + tier) ); assertThat( @@ -1896,7 +1907,7 @@ public void testMappedFieldsForTelemetry() { equalTo("elasticsearch:health:shards_availability:diagnosis:migrate_data_tiers_include_data:tier:" + tier) ); assertThat( - ACTION_INCREASE_TIER_CAPACITY_LOOKUP.get(tier).getUniqueId(), + service.getIncreaseNodeWithRoleCapacityAction(tier).getUniqueId(), equalTo("elasticsearch:health:shards_availability:diagnosis:increase_tier_capacity_for_allocations:tier:" + tier) ); } diff --git a/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java b/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java index 9da1eb9c553a8..26d880d0a5d8e 100644 --- a/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java +++ b/server/src/test/java/org/elasticsearch/features/FeatureServiceTests.java @@ -75,7 +75,7 @@ public void testFailsDuplicateFeatures() { public void testFailsNonHistoricalVersion() { FeatureSpecification fs = new TestFeatureSpecification( Set.of(), - Map.of(new NodeFeature("f1"), FeatureService.CLUSTER_FEATURES_ADDED_VERSION) + Map.of(new NodeFeature("f1"), Version.fromId(FeatureService.CLUSTER_FEATURES_ADDED_VERSION.id + 1)) ); assertThat( diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/FieldDataCacheTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/FieldDataCacheTests.java index c6ff7776b3526..c1d9e1dc0fd17 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/FieldDataCacheTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/FieldDataCacheTests.java @@ -20,6 +20,9 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData; import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; @@ -78,6 +81,52 @@ public void testLoadGlobal_neverCacheIfFieldIsMissing() throws Exception { dir.close(); } + public void testGlobalOrdinalsCircuitBreaker() throws Exception { + Directory dir = newDirectory(); + IndexWriterConfig iwc = new IndexWriterConfig(null); + iwc.setMergePolicy(NoMergePolicy.INSTANCE); + IndexWriter iw = new IndexWriter(dir, iwc); + long numDocs = randomIntBetween(66000, 70000); + + for (int i = 1; i <= numDocs; i++) { + Document doc = new Document(); + doc.add(new SortedSetDocValuesField("field1", new BytesRef(String.valueOf(i)))); + iw.addDocument(doc); + if (i % 10000 == 0) { + iw.commit(); + } + } + iw.close(); + DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(dir), new ShardId("_index", "_na_", 0)); + + int[] timesCalled = new int[1]; + SortedSetOrdinalsIndexFieldData sortedSetOrdinalsIndexFieldData = new SortedSetOrdinalsIndexFieldData( + new DummyAccountingFieldDataCache(), + "field1", + CoreValuesSourceType.KEYWORD, + new NoneCircuitBreakerService() { + @Override + public CircuitBreaker getBreaker(String name) { + assertThat(name, equalTo(CircuitBreaker.FIELDDATA)); + return new NoopCircuitBreaker("test") { + @Override + public void addEstimateBytesAndMaybeBreak(long bytes, String label) throws CircuitBreakingException { + assertThat(label, equalTo("Global Ordinals")); + assertThat(bytes, equalTo(0L)); + timesCalled[0]++; + } + }; + } + }, + MOCK_TO_SCRIPT_FIELD + ); + sortedSetOrdinalsIndexFieldData.loadGlobal(ir); + assertThat(timesCalled[0], equalTo(2)); + + ir.close(); + dir.close(); + } + private SortedSetOrdinalsIndexFieldData createSortedDV(String fieldName, IndexFieldDataCache indexFieldDataCache) { return new SortedSetOrdinalsIndexFieldData( indexFieldDataCache, diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java new file mode 100644 index 0000000000000..00e60f6e2cba1 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.StoredField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; +import org.elasticsearch.search.fetch.StoredFieldsSpec; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.List; +import java.util.Set; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.nullValue; + +public class BlockSourceReaderTests extends ESTestCase { + public void testSingle() throws IOException { + withIndex( + source -> source.field("field", "foo"), + ctx -> loadBlock(ctx, block -> assertThat(block.get(0), equalTo(new BytesRef("foo")))) + ); + } + + public void testMissing() throws IOException { + withIndex(source -> {}, ctx -> loadBlock(ctx, block -> assertThat(block.get(0), nullValue()))); + } + + public void testArray() throws IOException { + withIndex( + source -> source.startArray("field").value("foo").value("bar").endArray(), + ctx -> loadBlock(ctx, block -> assertThat(block.get(0), equalTo(List.of(new BytesRef("foo"), new BytesRef("bar"))))) + ); + } + + public void testEmptyArray() throws IOException { + withIndex(source -> source.startArray("field").endArray(), ctx -> loadBlock(ctx, block -> assertThat(block.get(0), nullValue()))); + } + + private void loadBlock(LeafReaderContext ctx, Consumer test) throws IOException { + BlockLoader loader = new BlockSourceReader.BytesRefsBlockLoader(SourceValueFetcher.toString(Set.of("field"))); + assertThat(loader.columnAtATimeReader(ctx), nullValue()); + BlockLoader.RowStrideReader reader = loader.rowStrideReader(ctx); + assertThat(loader.rowStrideStoredFieldSpec(), equalTo(StoredFieldsSpec.NEEDS_SOURCE)); + BlockLoaderStoredFieldsFromLeafLoader storedFields = new BlockLoaderStoredFieldsFromLeafLoader( + StoredFieldLoader.fromSpec(loader.rowStrideStoredFieldSpec()).getLoader(ctx, null), + loader.rowStrideStoredFieldSpec().requiresSource() ? SourceLoader.FROM_STORED_SOURCE.leaf(ctx.reader(), null) : null + ); + BlockLoader.Builder builder = loader.builder(TestBlock.FACTORY, 1); + storedFields.advanceTo(0); + reader.read(0, storedFields, builder); + TestBlock block = (TestBlock) builder.build(); + assertThat(block.size(), equalTo(1)); + test.accept(block); + } + + private void withIndex(CheckedConsumer buildSource, CheckedConsumer test) + throws IOException { + try ( + Directory directory = newDirectory(); + RandomIndexWriter writer = new RandomIndexWriter( + random(), + directory, + newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + XContentBuilder source = JsonXContent.contentBuilder(); + source.startObject(); + buildSource.accept(source); + source.endObject(); + writer.addDocument(List.of(new StoredField(SourceFieldMapper.NAME, BytesReference.bytes(source).toBytesRef()))); + try (IndexReader reader = writer.getReader()) { + assertThat(reader.leaves(), hasSize(1)); + test.accept(reader.leaves().get(0)); + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 1a9982f780a04..4618b9d5bd1ae 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -36,7 +36,6 @@ import org.elasticsearch.action.search.SearchShardTask; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.TransportSearchAction; -import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; @@ -1106,7 +1105,7 @@ public void testCanRewriteToMatchNone() { ); } - public void testSetSearchThrottled() { + public void testSetSearchThrottled() throws IOException { createIndex("throttled_threadpool_index"); client().execute( InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE, @@ -1144,21 +1143,6 @@ public void testSetSearchThrottled() { ); assertEquals("can not update private setting [index.search.throttled]; this setting is managed by Elasticsearch", iae.getMessage()); assertFalse(service.getIndicesService().indexServiceSafe(index).getIndexSettings().isSearchThrottled()); - SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false); - ShardSearchRequest req = new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - new ShardId(index, 0), - 0, - 1, - AliasFilter.EMPTY, - 1f, - -1, - null - ); - Thread currentThread = Thread.currentThread(); - // we still make sure can match is executed on the network thread - service.canMatch(req, ActionTestUtils.assertNoFailureListener(r -> assertSame(Thread.currentThread(), currentThread))); } public void testAggContextGetsMatchAll() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index e87184a38a776..f4898e1fe2c8c 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -150,6 +150,7 @@ import org.elasticsearch.indices.IndicesFeatures; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.IndicesServiceBuilder; import org.elasticsearch.indices.ShardLimitValidator; import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.indices.analysis.AnalysisModule; @@ -1756,44 +1757,39 @@ protected void assertSnapshotOrGenericThread() { IndexScopedSettings.BUILT_IN_INDEX_SETTINGS ); final MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); - indicesService = new IndicesService( - settings, - mock(PluginsService.class), - nodeEnv, - namedXContentRegistry, - new AnalysisRegistry( - environment, - emptyMap(), - emptyMap(), - emptyMap(), - emptyMap(), - emptyMap(), - emptyMap(), - emptyMap(), - emptyMap(), - emptyMap() - ), - indexNameExpressionResolver, - mapperRegistry, - namedWriteableRegistry, - threadPool, - indexScopedSettings, - new NoneCircuitBreakerService(), - bigArrays, - scriptService, - clusterService, - client, - new FeatureService(List.of(new IndicesFeatures())), - new MetaStateService(nodeEnv, namedXContentRegistry), - Collections.emptyList(), - emptyMap(), - null, - emptyMap(), - List.of(), - emptyMap(), - null, - () -> DocumentParsingObserver.EMPTY_INSTANCE - ); + + indicesService = new IndicesServiceBuilder().settings(settings) + .pluginsService(mock(PluginsService.class)) + .nodeEnvironment(nodeEnv) + .xContentRegistry(namedXContentRegistry) + .analysisRegistry( + new AnalysisRegistry( + environment, + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap() + ) + ) + .indexNameExpressionResolver(indexNameExpressionResolver) + .mapperRegistry(mapperRegistry) + .namedWriteableRegistry(namedWriteableRegistry) + .threadPool(threadPool) + .indexScopedSettings(indexScopedSettings) + .circuitBreakerService(new NoneCircuitBreakerService()) + .bigArrays(bigArrays) + .scriptService(scriptService) + .clusterService(clusterService) + .client(client) + .featureService(new FeatureService(List.of(new IndicesFeatures()))) + .metaStateService(new MetaStateService(nodeEnv, namedXContentRegistry)) + .documentParsingObserverSupplier(() -> DocumentParsingObserver.EMPTY_INSTANCE) + .build(); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); snapshotShardsService = new SnapshotShardsService( settings, diff --git a/test/fixtures/minio-fixture/build.gradle b/test/fixtures/minio-fixture/build.gradle index 0823482331e84..8673c51d46038 100644 --- a/test/fixtures/minio-fixture/build.gradle +++ b/test/fixtures/minio-fixture/build.gradle @@ -6,6 +6,27 @@ * Side Public License, v 1. */ apply plugin: 'elasticsearch.test.fixtures' +apply plugin: 'java' +apply plugin: 'elasticsearch.java' description = 'Fixture for MinIO Storage service' +configurations.all { + transitive = false +} + +dependencies { + testImplementation project(':test:framework') + + api "junit:junit:${versions.junit}" + api "org.testcontainers:testcontainers:${versions.testcontainer}" + implementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + implementation "org.slf4j:slf4j-api:${versions.slf4j}" + implementation "com.github.docker-java:docker-java-api:${versions.dockerJava}" + runtimeOnly "com.github.docker-java:docker-java-transport-zerodep:${versions.dockerJava}" + runtimeOnly "com.github.docker-java:docker-java-transport:${versions.dockerJava}" + runtimeOnly "com.github.docker-java:docker-java-core:${versions.dockerJava}" + runtimeOnly "org.apache.commons:commons-compress:${versions.commonsCompress}" + runtimeOnly "org.rnorth.duct-tape:duct-tape:${versions.ductTape}" + runtimeOnly "org.rnorth.duct-tape:duct-tape:${versions.ductTape}" +} diff --git a/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/minio/MinioTestContainer.java b/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/minio/MinioTestContainer.java new file mode 100644 index 0000000000000..f9e75b5d85750 --- /dev/null +++ b/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/minio/MinioTestContainer.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.fixtures.minio; + +import org.junit.rules.TestRule; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.images.builder.ImageFromDockerfile; + +public class MinioTestContainer extends GenericContainer implements TestRule { + + private static final int servicePort = 9000; + private final boolean enabled; + + public MinioTestContainer() { + this(true); + } + + public MinioTestContainer(boolean enabled) { + super( + new ImageFromDockerfile().withDockerfileFromBuilder( + builder -> builder.from("minio/minio:RELEASE.2021-03-01T04-20-55Z") + .env("MINIO_ACCESS_KEY", "s3_test_access_key") + .env("MINIO_SECRET_KEY", "s3_test_secret_key") + .run("mkdir -p /minio/data/bucket") + .cmd("server", "/minio/data") + .build() + ) + ); + if (enabled) { + addExposedPort(servicePort); + } + this.enabled = enabled; + } + + @Override + public void start() { + if (enabled) { + super.start(); + } + } + + public String getAddress() { + return "http://127.0.0.1:" + getMappedPort(servicePort); + } +} diff --git a/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/testcontainers/TestContainersThreadFilter.java b/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/testcontainers/TestContainersThreadFilter.java new file mode 100644 index 0000000000000..6a85d896b87d5 --- /dev/null +++ b/test/fixtures/minio-fixture/src/main/java/org/elasticsearch/test/fixtures/testcontainers/TestContainersThreadFilter.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.fixtures.testcontainers; + +import com.carrotsearch.randomizedtesting.ThreadFilter; + +public class TestContainersThreadFilter implements ThreadFilter { + @Override + public boolean reject(Thread t) { + return t.getName().startsWith("testcontainers-") || t.getName().startsWith("ducttape"); + } +} diff --git a/test/fixtures/s3-fixture/build.gradle b/test/fixtures/s3-fixture/build.gradle index a46af72de5cab..5e24fc9d24785 100644 --- a/test/fixtures/s3-fixture/build.gradle +++ b/test/fixtures/s3-fixture/build.gradle @@ -13,6 +13,9 @@ tasks.named("test").configure { enabled = false } dependencies { api project(':server') + api("junit:junit:${versions.junit}") { + transitive = false + } testImplementation project(':test:framework') } diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java index c8127646f7e79..4a4fb906fdc06 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixture.java @@ -12,22 +12,48 @@ import com.sun.net.httpserver.HttpServer; import org.elasticsearch.rest.RestStatus; +import org.junit.rules.ExternalResource; import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.Arrays; import java.util.Objects; -public class S3HttpFixture { +public class S3HttpFixture extends ExternalResource { private final HttpServer server; - S3HttpFixture(final String[] args) throws Exception { - this.server = HttpServer.create(new InetSocketAddress(InetAddress.getByName(args[0]), Integer.parseInt(args[1])), 0); - this.server.createContext("/", Objects.requireNonNull(createHandler(args))); + private boolean enabled; + + public S3HttpFixture() { + this(true); + } + + public S3HttpFixture(boolean enabled) { + this(enabled, "bucket", "base_path_integration_tests", "s3_test_access_key"); + } + + public S3HttpFixture(boolean enabled, String... args) { + this(resolveAddress("localhost", 0), args); + this.enabled = enabled; + } + + public S3HttpFixture(final String[] args) throws Exception { + this(resolveAddress(args[0], Integer.parseInt(args[1])), args); } - final void start() throws Exception { + public S3HttpFixture(InetSocketAddress inetSocketAddress, String... args) { + try { + this.server = HttpServer.create(inetSocketAddress, 0); + this.server.createContext("/", Objects.requireNonNull(createHandler(args))); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + final void startWithWait() throws Exception { try { server.start(); // wait to be killed @@ -37,10 +63,14 @@ final void start() throws Exception { } } + public final void start() throws Exception { + server.start(); + } + protected HttpHandler createHandler(final String[] args) { - final String bucket = Objects.requireNonNull(args[2]); - final String basePath = args[3]; - final String accessKey = Objects.requireNonNull(args[4]); + final String bucket = Objects.requireNonNull(args[0]); + final String basePath = args[1]; + final String accessKey = Objects.requireNonNull(args[2]); return new S3HttpHandler(bucket, basePath) { @Override @@ -59,7 +89,37 @@ public static void main(final String[] args) throws Exception { if (args == null || args.length < 5) { throw new IllegalArgumentException("S3HttpFixture expects 5 arguments [address, port, bucket, base path, access key]"); } - final S3HttpFixture fixture = new S3HttpFixture(args); - fixture.start(); + InetSocketAddress inetSocketAddress = resolveAddress(args[0], Integer.parseInt(args[1])); + final S3HttpFixture fixture = new S3HttpFixture(inetSocketAddress, Arrays.copyOfRange(args, 2, args.length)); + fixture.startWithWait(); + } + + public String getAddress() { + return "http://" + server.getAddress().getHostString() + ":" + server.getAddress().getPort(); + } + + public void stop(int delay) { + server.stop(delay); + } + + protected void before() throws Throwable { + if (enabled) { + start(); + } + } + + @Override + protected void after() { + if (enabled) { + stop(0); + } + } + + private static InetSocketAddress resolveAddress(String address, int port) { + try { + return new InetSocketAddress(InetAddress.getByName(address), port); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } } } diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEC2.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEC2.java index 05b931817fea4..3a53492bad7ec 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEC2.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEC2.java @@ -11,6 +11,7 @@ import org.elasticsearch.rest.RestStatus; +import java.net.InetSocketAddress; import java.nio.charset.StandardCharsets; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; @@ -22,14 +23,30 @@ public class S3HttpFixtureWithEC2 extends S3HttpFixtureWithSessionToken { private static final String EC2_PATH = "/latest/meta-data/iam/security-credentials/"; private static final String EC2_PROFILE = "ec2Profile"; - S3HttpFixtureWithEC2(final String[] args) throws Exception { + public S3HttpFixtureWithEC2(final String[] args) throws Exception { super(args); } + public S3HttpFixtureWithEC2(boolean enabled) { + this(enabled, "ec2_bucket", "ec2_base_path", "ec2_access_key", "ec2_session_token"); + } + + public S3HttpFixtureWithEC2(boolean enabled, String... args) { + super(enabled, args); + } + + public S3HttpFixtureWithEC2(InetSocketAddress inetSocketAddress, String[] strings) { + super(inetSocketAddress, strings); + } + + public S3HttpFixtureWithEC2() { + this(true); + } + @Override protected HttpHandler createHandler(final String[] args) { - final String ec2AccessKey = Objects.requireNonNull(args[4]); - final String ec2SessionToken = Objects.requireNonNull(args[5], "session token is missing"); + final String ec2AccessKey = Objects.requireNonNull(args[2]); + final String ec2SessionToken = Objects.requireNonNull(args[3], "session token is missing"); final HttpHandler delegate = super.createHandler(args); return exchange -> { @@ -83,6 +100,6 @@ public static void main(final String[] args) throws Exception { ); } final S3HttpFixtureWithEC2 fixture = new S3HttpFixtureWithEC2(args); - fixture.start(); + fixture.startWithWait(); } } diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithECS.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithECS.java index 579411207cce0..f10c0d70e7413 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithECS.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithECS.java @@ -11,19 +11,36 @@ import org.elasticsearch.rest.RestStatus; +import java.net.InetSocketAddress; import java.nio.charset.StandardCharsets; import java.util.Objects; public class S3HttpFixtureWithECS extends S3HttpFixtureWithEC2 { - private S3HttpFixtureWithECS(final String[] args) throws Exception { + public S3HttpFixtureWithECS(final String[] args) throws Exception { super(args); } + public S3HttpFixtureWithECS(boolean enabled) { + this(enabled, "ecs_bucket", "ecs_base_path", "ecs_access_key", "ecs_session_token"); + } + + public S3HttpFixtureWithECS(boolean enabled, String... args) { + super(enabled, args); + } + + public S3HttpFixtureWithECS(InetSocketAddress inetSocketAddress, String[] strings) { + super(inetSocketAddress, strings); + } + + public S3HttpFixtureWithECS() { + this(true); + } + @Override protected HttpHandler createHandler(final String[] args) { - final String ecsAccessKey = Objects.requireNonNull(args[4]); - final String ecsSessionToken = Objects.requireNonNull(args[5], "session token is missing"); + final String ecsAccessKey = Objects.requireNonNull(args[2]); + final String ecsSessionToken = Objects.requireNonNull(args[3], "session token is missing"); final HttpHandler delegate = super.createHandler(args); return exchange -> { @@ -47,6 +64,6 @@ public static void main(final String[] args) throws Exception { ); } final S3HttpFixtureWithECS fixture = new S3HttpFixtureWithECS(args); - fixture.start(); + fixture.startWithWait(); } } diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSTS.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSTS.java index 26b8f17dfd76f..e440f6d0b983e 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSTS.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSTS.java @@ -11,6 +11,8 @@ import org.elasticsearch.rest.RestStatus; +import java.io.IOException; +import java.net.InetSocketAddress; import java.net.URLDecoder; import java.nio.charset.StandardCharsets; import java.time.ZonedDateTime; @@ -26,15 +28,38 @@ public class S3HttpFixtureWithSTS extends S3HttpFixture { private static final String ROLE_ARN = "arn:aws:iam::123456789012:role/FederatedWebIdentityRole"; private static final String ROLE_NAME = "sts-fixture-test"; - private S3HttpFixtureWithSTS(final String[] args) throws Exception { + public S3HttpFixtureWithSTS(boolean enabled) { + this( + enabled, + "sts_bucket", + "sts_base_path", + "sts_access_key", + "sts_session_token", + "Atza|IQEBLjAsAhRFiXuWpUXuRvQ9PZL3GMFcYevydwIUFAHZwXZXXXXXXXXJnrulxKDHwy87oGKPznh0D6bEQZTSCzyoCtL_8S07pLpr0zMbn6w1lfVZKNTBdDansFBmtGnIsIapjI6xKR02Yc_2bQ8LZbUXSGm6Ry6_BG7PrtLZtj_dfCTj92xNGed-CrKqjG7nPBjNIL016GGvuS5gSvPRUxWES3VYfm1wl7WTI7jn-Pcb6M-buCgHhFOzTQxod27L9CqnOLio7N3gZAGpsp6n1-AJBOCJckcyXe2c6uD0srOJeZlKUm2eTDVMf8IehDVI0r1QOnTV6KzzAI3OY87Vd_cVMQ" + ); + } + + public S3HttpFixtureWithSTS(boolean enabled, String... args) { + super(enabled, args); + } + + public S3HttpFixtureWithSTS(final String[] args) throws Exception { super(args); } + public S3HttpFixtureWithSTS(InetSocketAddress inetSocketAddress, String[] args) throws IOException { + super(inetSocketAddress, args); + } + + public S3HttpFixtureWithSTS() { + this(true); + } + @Override protected HttpHandler createHandler(final String[] args) { - String accessKey = Objects.requireNonNull(args[4]); - String sessionToken = Objects.requireNonNull(args[5], "session token is missing"); - String webIdentityToken = Objects.requireNonNull(args[6], "web identity token is missing"); + String accessKey = Objects.requireNonNull(args[2]); + String sessionToken = Objects.requireNonNull(args[3], "session token is missing"); + String webIdentityToken = Objects.requireNonNull(args[4], "web identity token is missing"); final HttpHandler delegate = super.createHandler(args); return exchange -> { @@ -106,6 +131,6 @@ public static void main(final String[] args) throws Exception { ); } final S3HttpFixtureWithSTS fixture = new S3HttpFixtureWithSTS(args); - fixture.start(); + fixture.startWithWait(); } } diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSessionToken.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSessionToken.java index f514ccd66b555..713998af052d9 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSessionToken.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithSessionToken.java @@ -11,6 +11,7 @@ import org.elasticsearch.rest.RestStatus; +import java.net.InetSocketAddress; import java.util.Objects; import static fixture.s3.S3HttpHandler.sendError; @@ -21,9 +22,25 @@ public class S3HttpFixtureWithSessionToken extends S3HttpFixture { super(args); } + public S3HttpFixtureWithSessionToken(boolean enabled) { + this(enabled, "session_token_bucket", "session_token_base_path_integration_tests", "session_token_access_key", "session_token"); + } + + public S3HttpFixtureWithSessionToken(boolean enabled, String... args) { + super(enabled, args); + } + + public S3HttpFixtureWithSessionToken(InetSocketAddress inetSocketAddress, String[] args) { + super(inetSocketAddress, args); + } + + public S3HttpFixtureWithSessionToken() { + this(true); + } + @Override protected HttpHandler createHandler(final String[] args) { - final String sessionToken = Objects.requireNonNull(args[5], "session token is missing"); + final String sessionToken = Objects.requireNonNull(args[3], "session token is missing"); final HttpHandler delegate = super.createHandler(args); return exchange -> { final String securityToken = exchange.getRequestHeaders().getFirst("x-amz-security-token"); @@ -46,6 +63,6 @@ public static void main(final String[] args) throws Exception { ); } final S3HttpFixtureWithSessionToken fixture = new S3HttpFixtureWithSessionToken(args); - fixture.start(); + fixture.startWithWait(); } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java index d68324ff902e2..7bc4ed62eef84 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java @@ -1300,7 +1300,7 @@ public Set sourcePaths(String name) { } else { BlockLoaderStoredFieldsFromLeafLoader storedFieldsLoader = new BlockLoaderStoredFieldsFromLeafLoader( StoredFieldLoader.fromSpec(loader.rowStrideStoredFieldSpec()).getLoader(ctx, null), - loader.rowStrideStoredFieldSpec().requiresSource() + loader.rowStrideStoredFieldSpec().requiresSource() ? SourceLoader.FROM_STORED_SOURCE.leaf(ctx.reader(), null) : null ); storedFieldsLoader.advanceTo(0); BlockLoader.Builder builder = loader.builder(TestBlock.FACTORY, 1); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 3327137cef7b7..5c1bab55d13a1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -52,6 +52,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.health.node.selection.HealthNode; import org.elasticsearch.index.IndexSettings; @@ -100,6 +101,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.TreeSet; import java.util.concurrent.TimeUnit; @@ -137,7 +139,7 @@ public abstract class ESRestTestCase extends ESTestCase { public static final String CLIENT_SOCKET_TIMEOUT = "client.socket.timeout"; public static final String CLIENT_PATH_PREFIX = "client.path.prefix"; - private static Map historicalFeatures; + private static final Pattern SEMANTIC_VERSION_PATTERN = Pattern.compile("^(\\d+\\.\\d+\\.\\d+)\\D?.*"); /** * Convert the entity from a {@link Response} into a map of maps. @@ -209,6 +211,15 @@ public enum ProductFeature { private static EnumSet availableFeatures; private static TreeSet nodeVersions; + private static TestFeatureService testFeatureService; + + protected final boolean clusterHasFeature(String featureId) { + return testFeatureService.clusterHasFeature(featureId); + } + + protected boolean clusterHasFeature(NodeFeature feature) { + return testFeatureService.clusterHasFeature(feature.id()); + } @Before public void initClient() throws IOException { @@ -217,6 +228,7 @@ public void initClient() throws IOException { assert clusterHosts == null; assert availableFeatures == null; assert nodeVersions == null; + assert testFeatureService == null; clusterHosts = parseClusterHosts(getTestRestCluster()); logger.info("initializing REST clients against {}", clusterHosts); client = buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()])); @@ -229,7 +241,8 @@ public void initClient() throws IOException { Map nodes = (Map) response.get("nodes"); for (Map.Entry node : nodes.entrySet()) { Map nodeInfo = (Map) node.getValue(); - nodeVersions.add(Version.fromString(nodeInfo.get("version").toString())); + // TODO (ES-7316): change this for serverless/non-semantic (change to string or remove if not needed) + nodeVersions.add(parseLegacyVersion(nodeInfo.get("version").toString()).get()); for (Object module : (List) nodeInfo.get("modules")) { Map moduleInfo = (Map) module; final String moduleName = moduleInfo.get("name").toString(); @@ -268,7 +281,15 @@ public void initClient() throws IOException { ); } } + + testFeatureService = new TestFeatureService( + List.of(new RestTestLegacyFeatures()), // TODO (ES-7313): add new ESRestTestCaseHistoricalFeatures() too + nodeVersions, + Set.of() + ); // TODO (ES-7316): GET and pass cluster state } + + assert testFeatureService != null; assert client != null; assert adminClient != null; assert clusterHosts != null; @@ -449,6 +470,7 @@ public static void closeClients() throws IOException { adminClient = null; availableFeatures = null; nodeVersions = null; + testFeatureService = null; } } @@ -576,8 +598,7 @@ protected boolean preserveTemplatesUponCompletion() { protected boolean resetFeatureStates() { try { final Version minimumNodeVersion = minimumNodeVersion(); - // Reset feature state API was introduced in 7.13.0 - if (minimumNodeVersion.before(Version.V_7_13_0)) { + if (clusterHasFeature(RestTestLegacyFeatures.FEATURE_STATE_RESET_SUPPORTED) == false) { return false; } @@ -2073,7 +2094,9 @@ protected static IndexVersion minimumIndexVersion() throws IOException { // fallback on version if index version is not there IndexVersion indexVersion = versionStr != null ? IndexVersion.fromId(Integer.parseInt(versionStr)) - : IndexVersion.fromId(Version.fromString((String) nodeData.get("version")).id); + : IndexVersion.fromId( + parseLegacyVersion((String) nodeData.get("version")).map(Version::id).orElse(IndexVersions.MINIMUM_COMPATIBLE.id()) + ); if (minVersion == null || minVersion.after(indexVersion)) { minVersion = indexVersion; } @@ -2082,6 +2105,14 @@ protected static IndexVersion minimumIndexVersion() throws IOException { return minVersion; } + private static Optional parseLegacyVersion(String version) { + var semanticVersionMatcher = SEMANTIC_VERSION_PATTERN.matcher(version); + if (semanticVersionMatcher.matches()) { + return Optional.of(Version.fromString(semanticVersionMatcher.group(1))); + } + return Optional.empty(); + } + @SuppressWarnings("unchecked") private static void ensureGlobalCheckpointSynced(String index) throws Exception { assertBusy(() -> { @@ -2218,32 +2249,39 @@ private static boolean isMlEnabled() { } } - protected Map getHistoricalFeatures() { - if (historicalFeatures == null) { - Map historicalFeaturesMap = new HashMap<>(); - String metadataPath = System.getProperty("tests.features.metadata.path"); - if (metadataPath == null) { - throw new UnsupportedOperationException("Historical features information is unavailable when using legacy test plugins."); - } + private static class ESRestTestCaseHistoricalFeatures implements FeatureSpecification { + private static Map historicalFeatures; + + @Override + public Map getHistoricalFeatures() { + if (historicalFeatures == null) { + Map historicalFeaturesMap = new HashMap<>(); + String metadataPath = System.getProperty("tests.features.metadata.path"); + if (metadataPath == null) { + throw new UnsupportedOperationException( + "Historical features information is unavailable when using legacy test plugins." + ); + } - String[] metadataFiles = metadataPath.split(System.getProperty("path.separator")); - for (String metadataFile : metadataFiles) { - try ( - InputStream in = Files.newInputStream(PathUtils.get(metadataFile)); - XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, in) - ) { - for (Map.Entry entry : parser.mapStrings().entrySet()) { - historicalFeaturesMap.put(new NodeFeature(entry.getKey()), Version.fromString(entry.getValue())); + String[] metadataFiles = metadataPath.split(System.getProperty("path.separator")); + for (String metadataFile : metadataFiles) { + try ( + InputStream in = Files.newInputStream(PathUtils.get(metadataFile)); + XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, in) + ) { + for (Map.Entry entry : parser.mapStrings().entrySet()) { + historicalFeaturesMap.put(new NodeFeature(entry.getKey()), Version.fromString(entry.getValue())); + } + } catch (IOException e) { + throw new UncheckedIOException(e); } - } catch (IOException e) { - throw new UncheckedIOException(e); } + + historicalFeatures = Collections.unmodifiableMap(historicalFeaturesMap); } - historicalFeatures = Collections.unmodifiableMap(historicalFeaturesMap); + return historicalFeatures; } - - return historicalFeatures; } public static void setIgnoredErrorResponseCodes(Request request, RestStatus... restStatuses) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java new file mode 100644 index 0000000000000..26204f7d9739b --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.rest; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Map; + +/** + * This class groups historical features that have been removed from the production codebase, but are still used by the test + * framework to support BwC tests. Rather than leaving them in the main src we group them here, so it's clear they are not used in + * production code anymore. + */ +public class RestTestLegacyFeatures implements FeatureSpecification { + public static final NodeFeature FEATURE_STATE_RESET_SUPPORTED = new NodeFeature("system_indices.feature_state_reset_supported"); + public static final NodeFeature SYSTEM_INDICES_REST_ACCESS_ENFORCED = new NodeFeature("system_indices.rest_access_enforced"); + + @Override + public Map getHistoricalFeatures() { + return Map.of(FEATURE_STATE_RESET_SUPPORTED, Version.V_7_13_0, SYSTEM_INDICES_REST_ACCESS_ENFORCED, Version.V_8_0_0); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java b/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java new file mode 100644 index 0000000000000..adcac27607e0f --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.rest; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureData; +import org.elasticsearch.features.FeatureSpecification; + +import java.util.Collection; +import java.util.List; +import java.util.NavigableMap; +import java.util.Set; +import java.util.function.Predicate; + +class TestFeatureService { + private final Predicate historicalFeaturesPredicate; + private final Set clusterStateFeatures; + + TestFeatureService(List specs, Collection nodeVersions, Set clusterStateFeatures) { + + var minNodeVersion = nodeVersions.stream().min(Version::compareTo); + this.historicalFeaturesPredicate = minNodeVersion.>map(v -> { + var featureData = FeatureData.createFromSpecifications(specs); + var historicalFeatures = featureData.getHistoricalFeatures(); + return featureId -> hasHistoricalFeature(historicalFeatures, v, featureId); + }).orElse(f -> false); + this.clusterStateFeatures = clusterStateFeatures; + } + + private static boolean hasHistoricalFeature(NavigableMap> historicalFeatures, Version version, String featureId) { + var allHistoricalFeatures = historicalFeatures.lastEntry().getValue(); + assert allHistoricalFeatures != null && allHistoricalFeatures.contains(featureId) : "Unknown historical feature " + featureId; + var features = historicalFeatures.floorEntry(version); + return features != null && features.getValue().contains(featureId); + } + + boolean clusterHasFeature(String featureId) { + if (clusterStateFeatures.contains(featureId)) { + return true; + } + return historicalFeaturesPredicate.test(featureId); + } +} diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/SystemPropertyProvider.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/SystemPropertyProvider.java new file mode 100644 index 0000000000000..3c2d9f65357f3 --- /dev/null +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/SystemPropertyProvider.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.cluster; + +import org.elasticsearch.test.cluster.local.LocalClusterSpec; + +import java.util.Map; + +/** + * Functional interface for supplying system properties to an Elasticsearch node. This interface is designed to be implemented by tests + * and fixtures wanting to provide system properties to an {@link ElasticsearchCluster} in a dynamic fashion. + * Instances are evaluated lazily at cluster start time. + */ +public interface SystemPropertyProvider { + + /** + * Returns a collection of system properties to apply to an Elasticsearch cluster node. This method is called when the cluster is + * started so implementors can return dynamic environment values that may or may not be based on the given node spec. + * + * @param nodeSpec the specification for the given node to apply settings to + * @return system property variables to add to the node + */ + Map get(LocalClusterSpec.LocalNodeSpec nodeSpec); +} diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java index 72c24d9dcc7ef..78c796ae8dd9c 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java @@ -715,9 +715,9 @@ private Map getEnvironmentVariables() { } String systemProperties = ""; - if (spec.getSystemProperties().isEmpty() == false) { - systemProperties = spec.getSystemProperties() - .entrySet() + Map resolvedSystemProperties = new HashMap<>(spec.resolveSystemProperties()); + if (resolvedSystemProperties.isEmpty() == false) { + systemProperties = resolvedSystemProperties.entrySet() .stream() .map(entry -> "-D" + entry.getKey() + "=" + entry.getValue()) .map(p -> p.replace("${ES_PATH_CONF}", configDir.toString())) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterSpecBuilder.java index 78dbb8fb1f591..1b9691842f13a 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterSpecBuilder.java @@ -183,6 +183,7 @@ private LocalNodeSpec build(LocalClusterSpec cluster) { getKeystoreFiles(), getKeystorePassword(), getExtraConfigFiles(), + getSystemPropertyProviders(), getSystemProperties(), getJvmArgs() ); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java index 0cc9d4a360fb8..d8b0d6df5515c 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java @@ -11,6 +11,7 @@ import org.elasticsearch.test.cluster.EnvironmentProvider; import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.SettingsProvider; +import org.elasticsearch.test.cluster.SystemPropertyProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.util.Version; import org.elasticsearch.test.cluster.util.resource.Resource; @@ -38,6 +39,7 @@ public abstract class AbstractLocalSpecBuilder> im private final Map keystoreFiles = new HashMap<>(); private final Map extraConfigFiles = new HashMap<>(); private final Map systemProperties = new HashMap<>(); + private final List systemPropertyProviders = new ArrayList<>(); private final List jvmArgs = new ArrayList<>(); private DistributionType distributionType; private Version version; @@ -204,10 +206,31 @@ public T systemProperty(String property, String value) { return cast(this); } + @Override + public T systemProperty(String key, Supplier supplier) { + this.systemPropertyProviders.add(s -> Map.of(key, supplier.get())); + return cast(this); + } + + public T systemProperty(SystemPropertyProvider systemPropertyProvider) { + this.systemPropertyProviders.add(systemPropertyProvider); + return cast(this); + } + + @Override + public T systemProperty(String key, Supplier value, Predicate predicate) { + this.systemPropertyProviders.add(s -> predicate.test(s) ? Map.of(key, value.get()) : Map.of()); + return cast(this); + } + public Map getSystemProperties() { return inherit(() -> parent.getSystemProperties(), systemProperties); } + public List getSystemPropertyProviders() { + return inherit(() -> parent.getSystemPropertyProviders(), systemPropertyProviders); + } + @Override public T jvmArg(String arg) { this.jvmArgs.add(arg); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java index 4b20afcf1e8b4..6cb92a3436aac 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java @@ -34,4 +34,5 @@ public ElasticsearchCluster build() { ) ); } + } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java index e87f370e2b592..de0d541c8535f 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalClusterSpec.java @@ -12,6 +12,7 @@ import org.elasticsearch.test.cluster.EnvironmentProvider; import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.SettingsProvider; +import org.elasticsearch.test.cluster.SystemPropertyProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.local.model.User; import org.elasticsearch.test.cluster.util.Version; @@ -88,6 +89,7 @@ public static class LocalNodeSpec { private final Map keystoreFiles; private final String keystorePassword; private final Map extraConfigFiles; + private final List systemPropertyProviders; private final Map systemProperties; private final List jvmArgs; private Version version; @@ -109,6 +111,7 @@ public LocalNodeSpec( Map keystoreFiles, String keystorePassword, Map extraConfigFiles, + List systemPropertyProviders, Map systemProperties, List jvmArgs ) { @@ -128,6 +131,7 @@ public LocalNodeSpec( this.keystoreFiles = keystoreFiles; this.keystorePassword = keystorePassword; this.extraConfigFiles = extraConfigFiles; + this.systemPropertyProviders = systemPropertyProviders; this.systemProperties = systemProperties; this.jvmArgs = jvmArgs; } @@ -184,10 +188,6 @@ public Map getExtraConfigFiles() { return extraConfigFiles; } - public Map getSystemProperties() { - return systemProperties; - } - public List getJvmArgs() { return jvmArgs; } @@ -278,6 +278,24 @@ public Map resolveEnvironment() { return resolvedEnvironment; } + /** + * Resolve node system properties. Order of precedence is as follows: + *
    + *
  1. SystemProperties from cluster configured {@link SystemPropertyProvider}
  2. + *
  3. SystemProperties variables from node configured {@link SystemPropertyProvider}
  4. + *
  5. SystemProperties variables cluster settings
  6. + *
  7. SystemProperties variables node settings
  8. + *
+ * + * @return resolved system properties for node + */ + public Map resolveSystemProperties() { + Map resolvedSystemProperties = new HashMap<>(); + systemPropertyProviders.forEach(p -> resolvedSystemProperties.putAll(p.get(this))); + resolvedSystemProperties.putAll(systemProperties); + return resolvedSystemProperties; + } + /** * Returns a new {@link LocalNodeSpec} without the given {@link SettingsProvider}s. This is needed when resolving settings from a * settings provider to avoid infinite recursion. @@ -308,6 +326,7 @@ private LocalNodeSpec getFilteredSpec(SettingsProvider filteredProvider, Setting n.keystoreFiles, n.keystorePassword, n.extraConfigFiles, + n.systemPropertyProviders, n.systemProperties, n.jvmArgs ) diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java index e3b6b98d84755..c18129a7c61a5 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java @@ -11,6 +11,7 @@ import org.elasticsearch.test.cluster.EnvironmentProvider; import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.SettingsProvider; +import org.elasticsearch.test.cluster.SystemPropertyProvider; import org.elasticsearch.test.cluster.local.LocalClusterSpec.LocalNodeSpec; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.util.Version; @@ -121,6 +122,22 @@ interface LocalSpecBuilder> { */ T systemProperty(String property, String value); + /** + * Adds a system property to node JVM arguments computed by the given supplier + */ + T systemProperty(String property, Supplier supplier); + + /** + * Adds a system property to node JVM arguments computed by the given supplier + * when the given predicate evaluates to {@code true}. + */ + T systemProperty(String setting, Supplier value, Predicate predicate); + + /** + * Register a {@link SystemPropertyProvider}. + */ + T systemProperty(SystemPropertyProvider systemPropertyProvider); + /** * Adds an additional command line argument to node JVM arguments. */ diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml index 70b6904bc4a28..0030040b572c9 100644 --- a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml @@ -55,6 +55,10 @@ setup: --- "Test traces-apm-* data stream indexing": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/102360" + - do: index: index: traces-apm-testing diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java index 9ceac97bce384..5c7795ecbd18f 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java @@ -1445,7 +1445,6 @@ public void testCancelViaAsyncSearchDelete() throws Exception { assertTrue(statusResponse.isPartial()); assertTrue(statusResponse.isRunning()); assertThat(statusResponse.getClusters().getTotal(), equalTo(2)); - assertThat(statusResponse.getFailedShards(), equalTo(0)); assertNull(statusResponse.getCompletionStatus()); } finally { SearchListenerPlugin.allowQueryPhase(); diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java index b4830ca97938f..1766d8fe47820 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java @@ -14,6 +14,7 @@ public class BlobCacheMetrics { private final LongCounter cacheMissCounter; + private final LongCounter evictedCountNonZeroFrequency; private final LongHistogram cacheMissLoadTimes; public BlobCacheMetrics(MeterRegistry meterRegistry) { @@ -23,6 +24,11 @@ public BlobCacheMetrics(MeterRegistry meterRegistry) { "The number of times there was a cache miss that triggered a read from the blob store", "count" ), + meterRegistry.registerLongCounter( + "elasticsearch.blob_cache.count_of_evicted_used_regions", + "The number of times a cache entry was evicted where the frequency was not zero", + "entries" + ), meterRegistry.registerLongHistogram( "elasticsearch.blob_cache.cache_miss_load_times", "The timing data for populating entries in the blob store resulting from a cache miss.", @@ -31,8 +37,9 @@ public BlobCacheMetrics(MeterRegistry meterRegistry) { ); } - BlobCacheMetrics(LongCounter cacheMissCounter, LongHistogram cacheMissLoadTimes) { + BlobCacheMetrics(LongCounter cacheMissCounter, LongCounter evictedCountNonZeroFrequency, LongHistogram cacheMissLoadTimes) { this.cacheMissCounter = cacheMissCounter; + this.evictedCountNonZeroFrequency = evictedCountNonZeroFrequency; this.cacheMissLoadTimes = cacheMissLoadTimes; } @@ -42,6 +49,10 @@ public LongCounter getCacheMissCounter() { return cacheMissCounter; } + public LongCounter getEvictedCountNonZeroFrequency() { + return evictedCountNonZeroFrequency; + } + public LongHistogram getCacheMissLoadTimes() { return cacheMissLoadTimes; } diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index e0e5779993044..8f3dc0c8ef52a 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -1057,18 +1057,24 @@ public int forceEvict(Predicate cacheKeyPredicate) { } }); var evictedCount = 0; + var nonZeroFrequencyEvictedCount = 0; if (matchingEntries.isEmpty() == false) { synchronized (SharedBlobCacheService.this) { for (LFUCacheEntry entry : matchingEntries) { + int frequency = entry.freq; boolean evicted = entry.chunk.forceEvict(); if (evicted && entry.chunk.io != null) { unlink(entry); keyMapping.remove(entry.chunk.regionKey, entry); evictedCount++; + if (frequency > 0) { + nonZeroFrequencyEvictedCount++; + } } } } } + blobCacheMetrics.getEvictedCountNonZeroFrequency().incrementBy(nonZeroFrequencyEvictedCount); return evictedCount; } @@ -1088,8 +1094,12 @@ private LFUCacheEntry initChunk(LFUCacheEntry entry) { assignToSlot(entry, freeSlot); } else { // need to evict something + int frequency; synchronized (SharedBlobCacheService.this) { - maybeEvict(); + frequency = maybeEvict(); + } + if (frequency > 0) { + blobCacheMetrics.getEvictedCountNonZeroFrequency().increment(); } final SharedBytes.IO freeSlotRetry = freeRegions.poll(); if (freeSlotRetry != null) { @@ -1221,18 +1231,25 @@ private void unlink(final LFUCacheEntry entry) { assert invariant(entry, false); } - private void maybeEvict() { + /** + * Cycles through the {@link LFUCacheEntry} from 0 to max frequency and + * tries to evict a chunk if no one is holding onto its resources anymore + * + * @return the frequency of the evicted entry as integer or -1 if no entry was evicted from cache + */ + private int maybeEvict() { assert Thread.holdsLock(SharedBlobCacheService.this); - for (int i = 0; i < maxFreq; i++) { - for (LFUCacheEntry entry = freqs[i]; entry != null; entry = entry.next) { + for (int currentFreq = 0; currentFreq < maxFreq; currentFreq++) { + for (LFUCacheEntry entry = freqs[currentFreq]; entry != null; entry = entry.next) { boolean evicted = entry.chunk.tryEvict(); if (evicted && entry.chunk.io != null) { unlink(entry); keyMapping.remove(entry.chunk.regionKey, entry); - return; + return currentFreq; } } } + return -1; } private void computeDecay() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtAuthenticationToken.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtAuthenticationToken.java index 698938ee2f78f..ebfaae72b9df2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtAuthenticationToken.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/jwt/JwtAuthenticationToken.java @@ -44,6 +44,7 @@ public static JwtAuthenticationToken tryParseJwt(SecureString userCredentials, @ * See also {@code JwtRealm#authenticate}. * @param clientAuthenticationSharedSecret URL-safe Shared Secret for Client authentication. Required by some JWT realms. */ + @SuppressWarnings("this-escape") public JwtAuthenticationToken( SignedJWT signedJWT, byte[] userCredentialsHash, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash-mb.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash-mb.json index e6efc1ea5a11b..706b582f5c3af 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash-mb.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash-mb.json @@ -523,6 +523,9 @@ "name": { "type": "keyword", "ignore_above": 1024 + }, + "ip": { + "type": "ip" } } }, diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java index 7836f798af70f..57132116c7010 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java @@ -192,6 +192,7 @@ public Collection createComponents(PluginServices services) { // Behavioral analytics components final AnalyticsTemplateRegistry analyticsTemplateRegistry = new AnalyticsTemplateRegistry( services.clusterService(), + services.featureService(), services.threadPool(), services.client(), services.xContentRegistry() diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java index 3afdeb4897992..81e072479d402 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearchFeatures.java @@ -10,6 +10,7 @@ import org.elasticsearch.Version; import org.elasticsearch.features.FeatureSpecification; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.xpack.application.analytics.AnalyticsTemplateRegistry; import org.elasticsearch.xpack.application.connector.ConnectorTemplateRegistry; import java.util.Map; @@ -17,6 +18,11 @@ public class EnterpriseSearchFeatures implements FeatureSpecification { @Override public Map getHistoricalFeatures() { - return Map.of(ConnectorTemplateRegistry.CONNECTOR_TEMPLATES_FEATURE, Version.V_8_10_0); + return Map.of( + ConnectorTemplateRegistry.CONNECTOR_TEMPLATES_FEATURE, + Version.V_8_10_0, + AnalyticsTemplateRegistry.ANALYTICS_TEMPLATE_FEATURE, + Version.V_8_12_0 + ); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java index 7472063e92e11..a1446606a21af 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java @@ -6,13 +6,14 @@ */ package org.elasticsearch.xpack.application.analytics; -import org.elasticsearch.Version; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.features.FeatureService; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -35,8 +36,7 @@ public class AnalyticsTemplateRegistry extends IndexTemplateRegistry { - // This registry requires all nodes to be at least 8.12.0 - static final Version MIN_NODE_VERSION = Version.V_8_12_0; + public static final NodeFeature ANALYTICS_TEMPLATE_FEATURE = new NodeFeature("behavioral_analytics.templates"); // This number must be incremented when we make changes to built-in templates. static final int REGISTRY_VERSION = 3; @@ -103,13 +103,17 @@ protected List getIngestPipelines() { ) ); + private final FeatureService featureService; + public AnalyticsTemplateRegistry( ClusterService clusterService, + FeatureService featureService, ThreadPool threadPool, Client client, NamedXContentRegistry xContentRegistry ) { super(Settings.EMPTY, clusterService, threadPool, client, xContentRegistry); + this.featureService = featureService; } @Override @@ -140,8 +144,6 @@ protected boolean requiresMasterNode() { @Override protected boolean isClusterReady(ClusterChangedEvent event) { - // Ensure templates are installed only once all nodes are updated to 8.8.0. - Version minNodeVersion = event.state().nodes().getMinNodeVersion(); - return minNodeVersion.onOrAfter(MIN_NODE_VERSION); + return featureService.clusterHasFeature(event.state(), ANALYTICS_TEMPLATE_FEATURE); } } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java index d1e0e23ee3230..8bf06b8954080 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; +import org.elasticsearch.features.FeatureService; import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.test.ClusterServiceUtils; @@ -41,6 +42,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.application.EnterpriseSearchFeatures; import org.elasticsearch.xpack.core.ilm.IndexLifecycleMetadata; import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; import org.elasticsearch.xpack.core.ilm.LifecyclePolicyMetadata; @@ -75,7 +77,13 @@ public void createRegistryAndClient() { threadPool = new TestThreadPool(this.getClass().getName()); client = new VerifyingClient(threadPool); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); - registry = new AnalyticsTemplateRegistry(clusterService, threadPool, client, NamedXContentRegistry.EMPTY); + registry = new AnalyticsTemplateRegistry( + clusterService, + new FeatureService(List.of(new EnterpriseSearchFeatures())), + threadPool, + client, + NamedXContentRegistry.EMPTY + ); } @After diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java index cc6a2d4e41104..b7b2844b9be26 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java @@ -30,14 +30,12 @@ public class MultivalueDedupeBytesRef { * The choice of number has been experimentally derived. */ private static final int ALWAYS_COPY_MISSING = 20; // TODO BytesRef should try adding to the hash *first* and then comparing. - private final Block.Ref ref; private final BytesRefBlock block; private BytesRef[] work = new BytesRef[ArrayUtil.oversize(2, org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; private int w; - public MultivalueDedupeBytesRef(Block.Ref ref) { - this.ref = ref; - this.block = (BytesRefBlock) ref.block(); + public MultivalueDedupeBytesRef(BytesRefBlock block) { + this.block = block; // TODO very large numbers might want a hash based implementation - and for BytesRef that might not be that big fillWork(0, work.length); } @@ -46,11 +44,12 @@ public MultivalueDedupeBytesRef(Block.Ref ref) { * Remove duplicate values from each position and write the results to a * {@link Block} using an adaptive algorithm based on the size of the input list. */ - public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { + public BytesRefBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -85,7 +84,7 @@ public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -95,11 +94,12 @@ public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { * case complexity for larger. Prefer {@link #dedupeToBlockAdaptive} * which picks based on the number of elements at each position. */ - public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { + public BytesRefBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -112,7 +112,7 @@ public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -124,11 +124,12 @@ public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { * performance is dominated by the {@code n*log n} sort. Prefer * {@link #dedupeToBlockAdaptive} unless you need the results sorted. */ - public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { + public BytesRefBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -141,7 +142,7 @@ public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java index d9de26a36d830..74333fa6909d9 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java @@ -29,25 +29,24 @@ public class MultivalueDedupeDouble { * The choice of number has been experimentally derived. */ private static final int ALWAYS_COPY_MISSING = 110; - private final Block.Ref ref; private final DoubleBlock block; private double[] work = new double[ArrayUtil.oversize(2, Double.BYTES)]; private int w; - public MultivalueDedupeDouble(Block.Ref ref) { - this.ref = ref; - this.block = (DoubleBlock) ref.block(); + public MultivalueDedupeDouble(DoubleBlock block) { + this.block = block; } /** * Remove duplicate values from each position and write the results to a * {@link Block} using an adaptive algorithm based on the size of the input list. */ - public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { + public DoubleBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -82,7 +81,7 @@ public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -92,11 +91,12 @@ public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { * case complexity for larger. Prefer {@link #dedupeToBlockAdaptive} * which picks based on the number of elements at each position. */ - public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { + public DoubleBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -109,7 +109,7 @@ public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -121,11 +121,12 @@ public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { * performance is dominated by the {@code n*log n} sort. Prefer * {@link #dedupeToBlockAdaptive} unless you need the results sorted. */ - public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { + public DoubleBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -138,7 +139,7 @@ public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java index aad15dde6aec9..000e30c51d9aa 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java @@ -28,25 +28,24 @@ public class MultivalueDedupeInt { * The choice of number has been experimentally derived. */ private static final int ALWAYS_COPY_MISSING = 300; - private final Block.Ref ref; private final IntBlock block; private int[] work = new int[ArrayUtil.oversize(2, Integer.BYTES)]; private int w; - public MultivalueDedupeInt(Block.Ref ref) { - this.ref = ref; - this.block = (IntBlock) ref.block(); + public MultivalueDedupeInt(IntBlock block) { + this.block = block; } /** * Remove duplicate values from each position and write the results to a * {@link Block} using an adaptive algorithm based on the size of the input list. */ - public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { + public IntBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -81,7 +80,7 @@ public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -91,11 +90,12 @@ public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { * case complexity for larger. Prefer {@link #dedupeToBlockAdaptive} * which picks based on the number of elements at each position. */ - public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { + public IntBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -108,7 +108,7 @@ public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -120,11 +120,12 @@ public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { * performance is dominated by the {@code n*log n} sort. Prefer * {@link #dedupeToBlockAdaptive} unless you need the results sorted. */ - public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { + public IntBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -137,7 +138,7 @@ public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java index 98f79f3989c27..a981b397a123d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java @@ -30,25 +30,24 @@ public class MultivalueDedupeLong { */ private static final int ALWAYS_COPY_MISSING = 300; - private final Block.Ref ref; private final LongBlock block; private long[] work = new long[ArrayUtil.oversize(2, Long.BYTES)]; private int w; - public MultivalueDedupeLong(Block.Ref ref) { - this.ref = ref; - this.block = (LongBlock) ref.block(); + public MultivalueDedupeLong(LongBlock block) { + this.block = block; } /** * Remove duplicate values from each position and write the results to a * {@link Block} using an adaptive algorithm based on the size of the input list. */ - public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { + public LongBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -83,7 +82,7 @@ public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -93,11 +92,12 @@ public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { * case complexity for larger. Prefer {@link #dedupeToBlockAdaptive} * which picks based on the number of elements at each position. */ - public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { + public LongBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -110,7 +110,7 @@ public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -122,11 +122,12 @@ public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { * performance is dominated by the {@code n*log n} sort. Prefer * {@link #dedupeToBlockAdaptive} unless you need the results sorted. */ - public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { + public LongBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -139,7 +140,7 @@ public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BooleanBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BooleanBlockHash.java index 6f041a6681659..caa16bc263005 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BooleanBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BooleanBlockHash.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BitArray; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; -import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BooleanVector; import org.elasticsearch.compute.data.IntBlock; @@ -70,7 +69,7 @@ private IntVector add(BooleanVector vector) { } private IntBlock add(BooleanBlock block) { - return new MultivalueDedupeBoolean(Block.Ref.floating(block)).hash(everSeen); + return new MultivalueDedupeBoolean(block).hash(everSeen); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java index 2f1bb4f858ff4..579b114ae2609 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java @@ -87,7 +87,7 @@ private IntVector add(BytesRefVector vector) { private IntBlock add(BytesRefBlock block) { // TODO: use block factory - MultivalueDedupe.HashResult result = new MultivalueDedupeBytesRef(Block.Ref.floating(block)).hash(bytesRefHash); + MultivalueDedupe.HashResult result = new MultivalueDedupeBytesRef(block).hash(bytesRefHash); seenNull |= result.sawNull(); return result.ords(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/DoubleBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/DoubleBlockHash.java index a8a67180775fb..468180430424c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/DoubleBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/DoubleBlockHash.java @@ -80,7 +80,7 @@ private IntVector add(DoubleVector vector) { } private IntBlock add(DoubleBlock block) { - MultivalueDedupe.HashResult result = new MultivalueDedupeDouble(Block.Ref.floating(block)).hash(longHash); // TODO: block factory + MultivalueDedupe.HashResult result = new MultivalueDedupeDouble(block).hash(longHash); // TODO: block factory seenNull |= result.sawNull(); return result.ords(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/IntBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/IntBlockHash.java index 79e03e4dc0ed5..41d6bf1db8681 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/IntBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/IntBlockHash.java @@ -77,7 +77,7 @@ private IntVector add(IntVector vector) { } private IntBlock add(IntBlock block) { - MultivalueDedupe.HashResult result = new MultivalueDedupeInt(Block.Ref.floating(block)).hash(longHash); // TODO: block factory + MultivalueDedupe.HashResult result = new MultivalueDedupeInt(block).hash(longHash); // TODO: block factory seenNull |= result.sawNull(); return result.ords(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/LongBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/LongBlockHash.java index c736cfae65ee7..5ee273683d71c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/LongBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/LongBlockHash.java @@ -80,7 +80,7 @@ private IntVector add(LongVector vector) { } private IntBlock add(LongBlock block) { - MultivalueDedupe.HashResult result = new MultivalueDedupeLong(Block.Ref.floating(block)).hash(longHash); // TODO: block factory + MultivalueDedupe.HashResult result = new MultivalueDedupeLong(block).hash(longHash); // TODO: block factory seenNull |= result.sawNull(); return result.ords(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java index 8d7a9df523c3d..b1e2874614951 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.BlockLoaderStoredFieldsFromLeafLoader; +import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.search.fetch.StoredFieldsSpec; import org.elasticsearch.xcontent.XContentBuilder; @@ -42,6 +43,7 @@ import java.util.Map; import java.util.Objects; import java.util.TreeMap; +import java.util.function.Supplier; import java.util.stream.Collectors; /** @@ -52,12 +54,13 @@ public class ValuesSourceReaderOperator extends AbstractPageMappingOperator { /** * Creates a factory for {@link ValuesSourceReaderOperator}. * @param fields fields to load + * @param shardContexts per-shard loading information * @param docChannel the channel containing the shard, leaf/segment and doc id */ - public record Factory(List fields, List readers, int docChannel) implements OperatorFactory { + public record Factory(List fields, List shardContexts, int docChannel) implements OperatorFactory { @Override public Operator get(DriverContext driverContext) { - return new ValuesSourceReaderOperator(driverContext.blockFactory(), fields, readers, docChannel); + return new ValuesSourceReaderOperator(driverContext.blockFactory(), fields, shardContexts, docChannel); } @Override @@ -66,8 +69,10 @@ public String describe() { } } + public record ShardContext(IndexReader reader, Supplier newSourceLoader) {} + private final List fields; - private final List readers; + private final List shardContexts; private final int docChannel; private final ComputeBlockLoaderFactory blockFactory; @@ -86,9 +91,9 @@ public record FieldInfo(String name, List blockLoaders) {} * @param fields fields to load * @param docChannel the channel containing the shard, leaf/segment and doc id */ - public ValuesSourceReaderOperator(BlockFactory blockFactory, List fields, List readers, int docChannel) { + public ValuesSourceReaderOperator(BlockFactory blockFactory, List fields, List shardContexts, int docChannel) { this.fields = fields.stream().map(f -> new FieldWork(f)).toList(); - this.readers = readers; + this.shardContexts = shardContexts; this.docChannel = docChannel; this.blockFactory = new ComputeBlockLoaderFactory(blockFactory); } @@ -161,10 +166,11 @@ public int get(int i) { "found row stride readers [" + rowStrideReaders + "] without stored fields [" + storedFieldsSpec + "]" ); } + LeafReaderContext ctx = ctx(shard, segment); BlockLoaderStoredFieldsFromLeafLoader storedFields = new BlockLoaderStoredFieldsFromLeafLoader( // TODO enable the optimization by passing non-null to docs if correct - StoredFieldLoader.fromSpec(storedFieldsSpec).getLoader(ctx(shard, segment), null), - storedFieldsSpec.requiresSource() + StoredFieldLoader.fromSpec(storedFieldsSpec).getLoader(ctx, null), + storedFieldsSpec.requiresSource() ? shardContexts.get(shard).newSourceLoader.get().leaf(ctx.reader(), null) : null ); trackStoredFields(storedFieldsSpec); // TODO when optimization is enabled add it to tracking for (int p = 0; p < docs.getPositionCount(); p++) { @@ -209,9 +215,10 @@ private void loadFromManyLeaves(Block[] blocks, DocVector docVector) throws IOEx lastShard = shard; lastSegment = segment; StoredFieldsSpec storedFieldsSpec = storedFieldsSpecForShard(shard); + LeafReaderContext ctx = ctx(shard, segment); storedFields = new BlockLoaderStoredFieldsFromLeafLoader( - StoredFieldLoader.fromSpec(storedFieldsSpec).getLoader(ctx(shard, segment), null), - storedFieldsSpec.requiresSource() + StoredFieldLoader.fromSpec(storedFieldsSpec).getLoader(ctx, null), + storedFieldsSpec.requiresSource() ? shardContexts.get(shard).newSourceLoader.get().leaf(ctx.reader(), null) : null ); if (false == storedFieldsSpec.equals(StoredFieldsSpec.NO_REQUIREMENTS)) { trackStoredFields(storedFieldsSpec); @@ -328,7 +335,7 @@ public void close() { } private LeafReaderContext ctx(int shard, int segment) { - return readers.get(shard).leaves().get(segment); + return shardContexts.get(shard).reader.leaves().get(segment); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupe.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupe.java index ea4f9dc1e05a6..c0ede17588016 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupe.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupe.java @@ -28,13 +28,13 @@ public final class MultivalueDedupe { * Remove duplicate values from each position and write the results to a * {@link Block} using an adaptive algorithm based on the size of the input list. */ - public static Block.Ref dedupeToBlockAdaptive(Block.Ref ref, BlockFactory blockFactory) { - return switch (ref.block().elementType()) { - case BOOLEAN -> new MultivalueDedupeBoolean(ref).dedupeToBlock(blockFactory); - case BYTES_REF -> new MultivalueDedupeBytesRef(ref).dedupeToBlockAdaptive(blockFactory); - case INT -> new MultivalueDedupeInt(ref).dedupeToBlockAdaptive(blockFactory); - case LONG -> new MultivalueDedupeLong(ref).dedupeToBlockAdaptive(blockFactory); - case DOUBLE -> new MultivalueDedupeDouble(ref).dedupeToBlockAdaptive(blockFactory); + public static Block dedupeToBlockAdaptive(Block block, BlockFactory blockFactory) { + return switch (block.elementType()) { + case BOOLEAN -> new MultivalueDedupeBoolean((BooleanBlock) block).dedupeToBlock(blockFactory); + case BYTES_REF -> new MultivalueDedupeBytesRef((BytesRefBlock) block).dedupeToBlockAdaptive(blockFactory); + case INT -> new MultivalueDedupeInt((IntBlock) block).dedupeToBlockAdaptive(blockFactory); + case LONG -> new MultivalueDedupeLong((LongBlock) block).dedupeToBlockAdaptive(blockFactory); + case DOUBLE -> new MultivalueDedupeDouble((DoubleBlock) block).dedupeToBlockAdaptive(blockFactory); default -> throw new IllegalArgumentException(); }; } @@ -45,13 +45,13 @@ public static Block.Ref dedupeToBlockAdaptive(Block.Ref ref, BlockFactory blockF * case complexity for larger. Prefer {@link #dedupeToBlockAdaptive} * which picks based on the number of elements at each position. */ - public static Block.Ref dedupeToBlockUsingCopyMissing(Block.Ref ref, BlockFactory blockFactory) { - return switch (ref.block().elementType()) { - case BOOLEAN -> new MultivalueDedupeBoolean(ref).dedupeToBlock(blockFactory); - case BYTES_REF -> new MultivalueDedupeBytesRef(ref).dedupeToBlockUsingCopyMissing(blockFactory); - case INT -> new MultivalueDedupeInt(ref).dedupeToBlockUsingCopyMissing(blockFactory); - case LONG -> new MultivalueDedupeLong(ref).dedupeToBlockUsingCopyMissing(blockFactory); - case DOUBLE -> new MultivalueDedupeDouble(ref).dedupeToBlockUsingCopyMissing(blockFactory); + public static Block dedupeToBlockUsingCopyMissing(Block block, BlockFactory blockFactory) { + return switch (block.elementType()) { + case BOOLEAN -> new MultivalueDedupeBoolean((BooleanBlock) block).dedupeToBlock(blockFactory); + case BYTES_REF -> new MultivalueDedupeBytesRef((BytesRefBlock) block).dedupeToBlockUsingCopyMissing(blockFactory); + case INT -> new MultivalueDedupeInt((IntBlock) block).dedupeToBlockUsingCopyMissing(blockFactory); + case LONG -> new MultivalueDedupeLong((LongBlock) block).dedupeToBlockUsingCopyMissing(blockFactory); + case DOUBLE -> new MultivalueDedupeDouble((DoubleBlock) block).dedupeToBlockUsingCopyMissing(blockFactory); default -> throw new IllegalArgumentException(); }; } @@ -64,13 +64,13 @@ public static Block.Ref dedupeToBlockUsingCopyMissing(Block.Ref ref, BlockFactor * performance is dominated by the {@code n*log n} sort. Prefer * {@link #dedupeToBlockAdaptive} unless you need the results sorted. */ - public static Block.Ref dedupeToBlockUsingCopyAndSort(Block.Ref ref, BlockFactory blockFactory) { - return switch (ref.block().elementType()) { - case BOOLEAN -> new MultivalueDedupeBoolean(ref).dedupeToBlock(blockFactory); - case BYTES_REF -> new MultivalueDedupeBytesRef(ref).dedupeToBlockUsingCopyAndSort(blockFactory); - case INT -> new MultivalueDedupeInt(ref).dedupeToBlockUsingCopyAndSort(blockFactory); - case LONG -> new MultivalueDedupeLong(ref).dedupeToBlockUsingCopyAndSort(blockFactory); - case DOUBLE -> new MultivalueDedupeDouble(ref).dedupeToBlockUsingCopyAndSort(blockFactory); + public static Block dedupeToBlockUsingCopyAndSort(Block block, BlockFactory blockFactory) { + return switch (block.elementType()) { + case BOOLEAN -> new MultivalueDedupeBoolean((BooleanBlock) block).dedupeToBlock(blockFactory); + case BYTES_REF -> new MultivalueDedupeBytesRef((BytesRefBlock) block).dedupeToBlockUsingCopyAndSort(blockFactory); + case INT -> new MultivalueDedupeInt((IntBlock) block).dedupeToBlockUsingCopyAndSort(blockFactory); + case LONG -> new MultivalueDedupeLong((LongBlock) block).dedupeToBlockUsingCopyAndSort(blockFactory); + case DOUBLE -> new MultivalueDedupeDouble((DoubleBlock) block).dedupeToBlockUsingCopyAndSort(blockFactory); default -> throw new IllegalArgumentException(); }; } @@ -81,26 +81,33 @@ public static Block.Ref dedupeToBlockUsingCopyAndSort(Block.Ref ref, BlockFactor */ public static ExpressionEvaluator.Factory evaluator(ElementType elementType, ExpressionEvaluator.Factory field) { return switch (elementType) { - case BOOLEAN -> new EvaluatorFactory( - field, - (blockFactory, ref) -> new MultivalueDedupeBoolean(ref).dedupeToBlock(blockFactory) - ); - case BYTES_REF -> new EvaluatorFactory( - field, - (blockFactory, ref) -> new MultivalueDedupeBytesRef(ref).dedupeToBlockAdaptive(blockFactory) - ); - case INT -> new EvaluatorFactory( - field, - (blockFactory, ref) -> new MultivalueDedupeInt(ref).dedupeToBlockAdaptive(blockFactory) - ); - case LONG -> new EvaluatorFactory( - field, - (blockFactory, ref) -> new MultivalueDedupeLong(ref).dedupeToBlockAdaptive(blockFactory) - ); - case DOUBLE -> new EvaluatorFactory( - field, - (blockFactory, ref) -> new MultivalueDedupeDouble(ref).dedupeToBlockAdaptive(blockFactory) - ); + case BOOLEAN -> new EvaluatorFactory(field, (blockFactory, ref) -> { + try (ref) { + return Block.Ref.floating(new MultivalueDedupeBoolean((BooleanBlock) ref.block()).dedupeToBlock(blockFactory)); + } + }); + case BYTES_REF -> new EvaluatorFactory(field, (blockFactory, ref) -> { + try (ref) { + return Block.Ref.floating( + new MultivalueDedupeBytesRef((BytesRefBlock) ref.block()).dedupeToBlockAdaptive(blockFactory) + ); + } + }); + case INT -> new EvaluatorFactory(field, (blockFactory, ref) -> { + try (ref) { + return Block.Ref.floating(new MultivalueDedupeInt((IntBlock) ref.block()).dedupeToBlockAdaptive(blockFactory)); + } + }); + case LONG -> new EvaluatorFactory(field, (blockFactory, ref) -> { + try (ref) { + return Block.Ref.floating(new MultivalueDedupeLong((LongBlock) ref.block()).dedupeToBlockAdaptive(blockFactory)); + } + }); + case DOUBLE -> new EvaluatorFactory(field, (blockFactory, ref) -> { + try (ref) { + return Block.Ref.floating(new MultivalueDedupeDouble((DoubleBlock) ref.block()).dedupeToBlockAdaptive(blockFactory)); + } + }); case NULL -> field; // The page is all nulls and when you dedupe that it's still all nulls default -> throw new IllegalArgumentException("unsupported type [" + elementType + "]"); }; @@ -121,8 +128,8 @@ public static BatchEncoder batchEncoder(Block.Ref ref, int batchSize, boolean al return new BatchEncoder.DirectNulls(ref.block()); } var elementType = ref.block().elementType(); + var block = ref.block(); if (allowDirectEncoder && ref.block().mvDeduplicated()) { - var block = ref.block(); return switch (elementType) { case BOOLEAN -> new BatchEncoder.DirectBooleans((BooleanBlock) block); case BYTES_REF -> new BatchEncoder.DirectBytesRefs((BytesRefBlock) block); @@ -133,11 +140,11 @@ public static BatchEncoder batchEncoder(Block.Ref ref, int batchSize, boolean al }; } else { return switch (elementType) { - case BOOLEAN -> new MultivalueDedupeBoolean(ref).batchEncoder(batchSize); - case BYTES_REF -> new MultivalueDedupeBytesRef(ref).batchEncoder(batchSize); - case INT -> new MultivalueDedupeInt(ref).batchEncoder(batchSize); - case LONG -> new MultivalueDedupeLong(ref).batchEncoder(batchSize); - case DOUBLE -> new MultivalueDedupeDouble(ref).batchEncoder(batchSize); + case BOOLEAN -> new MultivalueDedupeBoolean((BooleanBlock) block).batchEncoder(batchSize); + case BYTES_REF -> new MultivalueDedupeBytesRef((BytesRefBlock) block).batchEncoder(batchSize); + case INT -> new MultivalueDedupeInt((IntBlock) block).batchEncoder(batchSize); + case LONG -> new MultivalueDedupeLong((LongBlock) block).batchEncoder(batchSize); + case DOUBLE -> new MultivalueDedupeDouble((DoubleBlock) block).batchEncoder(batchSize); default -> throw new IllegalArgumentException(); }; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java index 4170b4727df2c..5c745640e4790 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java @@ -8,7 +8,6 @@ package org.elasticsearch.compute.operator; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; -import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.IntBlock; @@ -31,24 +30,23 @@ public class MultivalueDedupeBoolean { */ public static final int TRUE_ORD = 2; - private final Block.Ref ref; private final BooleanBlock block; private boolean seenTrue; private boolean seenFalse; - public MultivalueDedupeBoolean(Block.Ref ref) { - this.ref = ref; - this.block = (BooleanBlock) ref.block(); + public MultivalueDedupeBoolean(BooleanBlock block) { + this.block = block; } /** * Dedupe values using an adaptive algorithm based on the size of the input list. */ - public Block.Ref dedupeToBlock(BlockFactory blockFactory) { + public BooleanBlock dedupeToBlock(BlockFactory blockFactory) { if (false == block.mayHaveMultivaluedFields()) { - return ref; + block.incRef(); + return block; } - try (ref; BooleanBlock.Builder builder = BooleanBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (BooleanBlock.Builder builder = BooleanBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -61,7 +59,7 @@ public Block.Ref dedupeToBlock(BlockFactory blockFactory) { } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java index 2e1cbf9a1135d..4fb90ddb57e25 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.operator; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; @@ -54,7 +53,7 @@ public class OrdinalsGroupingOperator implements Operator { public record OrdinalsGroupingOperatorFactory( List blockLoaders, - List readers, + List shardContexts, ElementType groupingElementType, int docChannel, String groupingField, @@ -67,7 +66,7 @@ public record OrdinalsGroupingOperatorFactory( public Operator get(DriverContext driverContext) { return new OrdinalsGroupingOperator( blockLoaders, - readers, + shardContexts, groupingElementType, docChannel, groupingField, @@ -85,7 +84,7 @@ public String describe() { } private final List blockLoaders; - private final List readers; + private final List shardContexts; private final int docChannel; private final String groupingField; @@ -104,7 +103,7 @@ public String describe() { public OrdinalsGroupingOperator( List blockLoaders, - List readers, + List shardContexts, ElementType groupingElementType, int docChannel, String groupingField, @@ -115,7 +114,7 @@ public OrdinalsGroupingOperator( ) { Objects.requireNonNull(aggregatorFactories); this.blockLoaders = blockLoaders; - this.readers = readers; + this.shardContexts = shardContexts; this.groupingElementType = groupingElementType; this.docChannel = docChannel; this.groupingField = groupingField; @@ -150,7 +149,7 @@ public void addInput(Page page) { return new OrdinalSegmentAggregator( driverContext.blockFactory(), this::createGroupingAggregators, - () -> blockLoader.ordinals(readers.get(k.shardIndex).leaves().get(k.segmentIndex)), + () -> blockLoader.ordinals(shardContexts.get(k.shardIndex).reader().leaves().get(k.segmentIndex)), bigArrays ); } catch (IOException e) { @@ -165,7 +164,7 @@ public void addInput(Page page) { int channelIndex = page.getBlockCount(); // extractor will append a new block at the end valuesAggregator = new ValuesAggregator( blockLoaders, - readers, + shardContexts, groupingElementType, docChannel, groupingField, @@ -466,7 +465,7 @@ private static class ValuesAggregator implements Releasable { ValuesAggregator( List blockLoaders, - List readers, + List shardContexts, ElementType groupingElementType, int docChannel, String groupingField, @@ -478,7 +477,7 @@ private static class ValuesAggregator implements Releasable { this.extractor = new ValuesSourceReaderOperator( BlockFactory.getNonBreakingInstance(), List.of(new ValuesSourceReaderOperator.FieldInfo(groupingField, blockLoaders)), - readers, + shardContexts, docChannel ); this.aggregator = new HashAggregationOperator( diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st index 21fdb257845d5..04c3a47384537 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st @@ -56,14 +56,12 @@ $elseif(long)$ private static final int ALWAYS_COPY_MISSING = 300; $endif$ - private final Block.Ref ref; private final $Type$Block block; private $type$[] work = new $type$[ArrayUtil.oversize(2, $BYTES$)]; private int w; - public MultivalueDedupe$Type$(Block.Ref ref) { - this.ref = ref; - this.block = ($Type$Block) ref.block(); + public MultivalueDedupe$Type$($Type$Block block) { + this.block = block; $if(BytesRef)$ // TODO very large numbers might want a hash based implementation - and for BytesRef that might not be that big fillWork(0, work.length); @@ -74,11 +72,12 @@ $endif$ * Remove duplicate values from each position and write the results to a * {@link Block} using an adaptive algorithm based on the size of the input list. */ - public Block.Ref dedupeToBlockAdaptive(BlockFactory blockFactory) { + public $Type$Block dedupeToBlockAdaptive(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; $Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try ($Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -117,7 +116,7 @@ $endif$ } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -127,11 +126,12 @@ $endif$ * case complexity for larger. Prefer {@link #dedupeToBlockAdaptive} * which picks based on the number of elements at each position. */ - public Block.Ref dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { + public $Type$Block dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; $Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try ($Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -148,7 +148,7 @@ $endif$ } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } @@ -160,11 +160,12 @@ $endif$ * performance is dominated by the {@code n*log n} sort. Prefer * {@link #dedupeToBlockAdaptive} unless you need the results sorted. */ - public Block.Ref dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { + public $Type$Block dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { if (block.mvDeduplicated()) { - return ref; + block.incRef(); + return block; } - try (ref; $Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try ($Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -181,7 +182,7 @@ $endif$ } } } - return Block.Ref.floating(builder.build()); + return builder.build(); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java index d9730d3f602c7..bfa252ded0420 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java @@ -48,6 +48,7 @@ import org.elasticsearch.compute.lucene.DataPartitioning; import org.elasticsearch.compute.lucene.LuceneOperator; import org.elasticsearch.compute.lucene.LuceneSourceOperator; +import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; import org.elasticsearch.compute.operator.AbstractPageMappingOperator; import org.elasticsearch.compute.operator.Driver; import org.elasticsearch.compute.operator.DriverContext; @@ -62,6 +63,7 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MapperServiceTestCase; +import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.internal.SearchContext; @@ -230,7 +232,7 @@ public String toString() { }, new OrdinalsGroupingOperator( List.of(new KeywordFieldMapper.KeywordFieldType("g").blockLoader(null)), - List.of(reader), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), ElementType.BYTES_REF, 0, gField, diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SingletonOrdinalsBuilderTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SingletonOrdinalsBuilderTests.java index ff231a0cc20e0..016d74aa6c299 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SingletonOrdinalsBuilderTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SingletonOrdinalsBuilderTests.java @@ -122,6 +122,37 @@ protected DriverContext breakingDriverContext() { // TODO move this to driverCon return new DriverContext(bigArrays, factory); } + public void testAllNull() throws IOException { + BlockFactory factory = breakingDriverContext().blockFactory(); + int count = 1000; + try (Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < count; i++) { + for (BytesRef v : new BytesRef[] { new BytesRef("a"), new BytesRef("b"), new BytesRef("c"), new BytesRef("d") }) { + indexWriter.addDocument(List.of(new SortedDocValuesField("f", v))); + } + } + try (IndexReader reader = indexWriter.getReader()) { + for (LeafReaderContext ctx : reader.leaves()) { + SortedDocValues docValues = ctx.reader().getSortedDocValues("f"); + try (SingletonOrdinalsBuilder builder = new SingletonOrdinalsBuilder(factory, docValues, ctx.reader().numDocs())) { + for (int i = 0; i < ctx.reader().maxDoc(); i++) { + if (ctx.reader().getLiveDocs() == null || ctx.reader().getLiveDocs().get(i)) { + assertThat(docValues.advanceExact(i), equalTo(true)); + builder.appendNull(); + } + } + try (BytesRefBlock built = builder.build()) { + for (int p = 0; p < built.getPositionCount(); p++) { + assertThat(built.isNull(p), equalTo(true)); + } + assertThat(built.areAllValuesNull(), equalTo(true)); + } + } + } + } + } + } + @After public void allBreakersEmpty() throws Exception { // first check that all big arrays are released, which can affect breakers diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java index 76810dbf2e3bc..d7dd26ab1122e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java @@ -61,6 +61,7 @@ import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.ProvidedIdFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.mapper.TextSearchInfo; import org.elasticsearch.index.mapper.TsidExtractingIdFieldMapper; @@ -130,7 +131,7 @@ static Operator.OperatorFactory factory(IndexReader reader, MappedFieldType ft) static Operator.OperatorFactory factory(IndexReader reader, String name, BlockLoader loader) { return new ValuesSourceReaderOperator.Factory( List.of(new ValuesSourceReaderOperator.FieldInfo(name, List.of(loader))), - List.of(reader), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), 0 ); } @@ -337,7 +338,7 @@ private void loadSimpleAndAssert(DriverContext driverContext, List input, operators.add( new ValuesSourceReaderOperator.Factory( List.of(fieldInfo(docValuesNumberField("key", NumberFieldMapper.NumberType.INTEGER))), - List.of(reader), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), 0 ).get(driverContext) ); @@ -347,7 +348,11 @@ private void loadSimpleAndAssert(DriverContext driverContext, List input, cases.removeAll(b); tests.addAll(b); operators.add( - new ValuesSourceReaderOperator.Factory(b.stream().map(i -> i.info).toList(), List.of(reader), 0).get(driverContext) + new ValuesSourceReaderOperator.Factory( + b.stream().map(i -> i.info).toList(), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), + 0 + ).get(driverContext) ); } List results = drive(operators, input.iterator(), driverContext); @@ -380,19 +385,34 @@ interface CheckReaders { void check(boolean forcedRowByRow, int pageCount, int segmentCount, Map readersBuilt); } - record FieldCase(ValuesSourceReaderOperator.FieldInfo info, CheckResults checkResults, CheckReaders checkReaders) { - FieldCase(MappedFieldType ft, CheckResults checkResults, CheckReaders checkReaders) { + interface CheckReadersWithName { + void check(String name, boolean forcedRowByRow, int pageCount, int segmentCount, Map readersBuilt); + } + + record FieldCase(ValuesSourceReaderOperator.FieldInfo info, CheckResults checkResults, CheckReadersWithName checkReaders) { + FieldCase(MappedFieldType ft, CheckResults checkResults, CheckReadersWithName checkReaders) { this(fieldInfo(ft), checkResults, checkReaders); } + + FieldCase(MappedFieldType ft, CheckResults checkResults, CheckReaders checkReaders) { + this( + ft, + checkResults, + (name, forcedRowByRow, pageCount, segmentCount, readersBuilt) -> checkReaders.check( + forcedRowByRow, + pageCount, + segmentCount, + readersBuilt + ) + ); + } } /** * Asserts that {@link ValuesSourceReaderOperator#status} claims that only * the expected readers are built after loading singleton pages. */ - // @Repeat(iterations = 100) public void testLoadAllStatus() { - DriverContext driverContext = driverContext(); testLoadAllStatus(false); } @@ -400,7 +420,6 @@ public void testLoadAllStatus() { * Asserts that {@link ValuesSourceReaderOperator#status} claims that only * the expected readers are built after loading non-singleton pages. */ - // @Repeat(iterations = 100) public void testLoadAllStatusAllInOnePage() { testLoadAllStatus(true); } @@ -411,7 +430,13 @@ private void testLoadAllStatus(boolean allInOnePage) { List cases = infoAndChecksForEachType(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING); // Build one operator for each field, so we get a unique map to assert on List operators = cases.stream() - .map(i -> new ValuesSourceReaderOperator.Factory(List.of(i.info), List.of(reader), 0).get(driverContext)) + .map( + i -> new ValuesSourceReaderOperator.Factory( + List.of(i.info), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), + 0 + ).get(driverContext) + ) .toList(); if (allInOnePage) { input = List.of(CannedSourceOperator.mergePages(input)); @@ -421,7 +446,7 @@ private void testLoadAllStatus(boolean allInOnePage) { ValuesSourceReaderOperator.Status status = (ValuesSourceReaderOperator.Status) operators.get(i).status(); assertThat(status.pagesProcessed(), equalTo(input.size())); FieldCase fc = cases.get(i); - fc.checkReaders.check(allInOnePage, input.size(), reader.leaves().size(), status.readersBuilt()); + fc.checkReaders.check(fc.info.name(), allInOnePage, input.size(), reader.leaves().size(), status.readersBuilt()); } } @@ -438,6 +463,13 @@ private List infoAndChecksForEachType(Block.MvOrdering docValuesMvOrd StatusChecks::mvLongsFromDocValues ) ); + r.add( + new FieldCase( + docValuesNumberField("missing_long", NumberFieldMapper.NumberType.LONG), + checks::constantNulls, + StatusChecks::constantNulls + ) + ); r.add( new FieldCase(sourceNumberField("source_long", NumberFieldMapper.NumberType.LONG), checks::longs, StatusChecks::longsFromSource) ); @@ -458,6 +490,13 @@ private List infoAndChecksForEachType(Block.MvOrdering docValuesMvOrd StatusChecks::mvIntsFromDocValues ) ); + r.add( + new FieldCase( + docValuesNumberField("missing_int", NumberFieldMapper.NumberType.INTEGER), + checks::constantNulls, + StatusChecks::constantNulls + ) + ); r.add( new FieldCase(sourceNumberField("source_int", NumberFieldMapper.NumberType.INTEGER), checks::ints, StatusChecks::intsFromSource) ); @@ -482,6 +521,13 @@ private List infoAndChecksForEachType(Block.MvOrdering docValuesMvOrd StatusChecks::mvShortsFromDocValues ) ); + r.add( + new FieldCase( + docValuesNumberField("missing_short", NumberFieldMapper.NumberType.SHORT), + checks::constantNulls, + StatusChecks::constantNulls + ) + ); r.add( new FieldCase(docValuesNumberField("byte", NumberFieldMapper.NumberType.BYTE), checks::bytes, StatusChecks::bytesFromDocValues) ); @@ -492,6 +538,13 @@ private List infoAndChecksForEachType(Block.MvOrdering docValuesMvOrd StatusChecks::mvBytesFromDocValues ) ); + r.add( + new FieldCase( + docValuesNumberField("missing_byte", NumberFieldMapper.NumberType.BYTE), + checks::constantNulls, + StatusChecks::constantNulls + ) + ); r.add( new FieldCase( docValuesNumberField("double", NumberFieldMapper.NumberType.DOUBLE), @@ -506,8 +559,16 @@ private List infoAndChecksForEachType(Block.MvOrdering docValuesMvOrd StatusChecks::mvDoublesFromDocValues ) ); + r.add( + new FieldCase( + docValuesNumberField("missing_double", NumberFieldMapper.NumberType.DOUBLE), + checks::constantNulls, + StatusChecks::constantNulls + ) + ); r.add(new FieldCase(new BooleanFieldMapper.BooleanFieldType("bool"), checks::bools, StatusChecks::boolFromDocValues)); r.add(new FieldCase(new BooleanFieldMapper.BooleanFieldType("mv_bool"), checks::mvBools, StatusChecks::mvBoolFromDocValues)); + r.add(new FieldCase(new BooleanFieldMapper.BooleanFieldType("missing_bool"), checks::constantNulls, StatusChecks::constantNulls)); r.add(new FieldCase(new KeywordFieldMapper.KeywordFieldType("kwd"), checks::strings, StatusChecks::keywordsFromDocValues)); r.add( new FieldCase( @@ -516,6 +577,7 @@ private List infoAndChecksForEachType(Block.MvOrdering docValuesMvOrd StatusChecks::mvKeywordsFromDocValues ) ); + r.add(new FieldCase(new KeywordFieldMapper.KeywordFieldType("missing_kwd"), checks::constantNulls, StatusChecks::constantNulls)); r.add(new FieldCase(storedKeywordField("stored_kwd"), checks::strings, StatusChecks::keywordsFromStored)); r.add(new FieldCase(storedKeywordField("mv_stored_kwd"), checks::mvStringsUnordered, StatusChecks::mvKeywordsFromStored)); r.add(new FieldCase(sourceKeywordField("source_kwd"), checks::strings, StatusChecks::keywordsFromSource)); @@ -544,6 +606,13 @@ private List infoAndChecksForEachType(Block.MvOrdering docValuesMvOrd StatusChecks::mvTextWithDelegate ) ); + r.add( + new FieldCase( + textFieldWithDelegate("missing_text_with_delegate", new KeywordFieldMapper.KeywordFieldType("missing_kwd")), + checks::constantNulls, + StatusChecks::constantNullTextWithDelegate + ) + ); r.add(new FieldCase(new ProvidedIdFieldMapper(() -> false).fieldType(), checks::ids, StatusChecks::id)); r.add(new FieldCase(TsidExtractingIdFieldMapper.INSTANCE.fieldType(), checks::ids, StatusChecks::id)); r.add( @@ -876,6 +945,26 @@ static void mvTextWithDelegate(boolean forcedRowByRow, int pageCount, int segmen } } + static void constantNullTextWithDelegate(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + if (forcedRowByRow) { + assertMap( + readers, + matchesMap().entry( + "missing_text_with_delegate:row_stride:Delegating[to=missing_kwd, impl=constant_nulls]", + segmentCount + ) + ); + } else { + assertMap( + readers, + matchesMap().entry( + "missing_text_with_delegate:column_at_a_time:Delegating[to=missing_kwd, impl=constant_nulls]", + lessThanOrEqualTo(pageCount) + ) + ); + } + } + private static void docValues( String name, String type, @@ -959,22 +1048,19 @@ private static void stored(String name, String type, boolean forcedRowByRow, int ); } - static void constantBytes(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + static void constantBytes(String name, boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { if (forcedRowByRow) { - assertMap(readers, matchesMap().entry("constant_bytes:row_stride:constant[[66 6f 6f]]", segmentCount)); + assertMap(readers, matchesMap().entry(name + ":row_stride:constant[[66 6f 6f]]", segmentCount)); } else { - assertMap( - readers, - matchesMap().entry("constant_bytes:column_at_a_time:constant[[66 6f 6f]]", lessThanOrEqualTo(pageCount)) - ); + assertMap(readers, matchesMap().entry(name + ":column_at_a_time:constant[[66 6f 6f]]", lessThanOrEqualTo(pageCount))); } } - static void constantNulls(boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { + static void constantNulls(String name, boolean forcedRowByRow, int pageCount, int segmentCount, Map readers) { if (forcedRowByRow) { - assertMap(readers, matchesMap().entry("null:row_stride:constant_nulls", segmentCount)); + assertMap(readers, matchesMap().entry(name + ":row_stride:constant_nulls", segmentCount)); } else { - assertMap(readers, matchesMap().entry("null:column_at_a_time:constant_nulls", lessThanOrEqualTo(pageCount))); + assertMap(readers, matchesMap().entry(name + ":column_at_a_time:constant_nulls", lessThanOrEqualTo(pageCount))); } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MultivalueDedupeTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MultivalueDedupeTests.java index 517936478ea22..5b03dc294362f 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MultivalueDedupeTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MultivalueDedupeTests.java @@ -22,8 +22,13 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockTestUtils; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.core.Releasables; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matcher; import org.junit.After; @@ -104,19 +109,19 @@ public MultivalueDedupeTests( public void testDedupeAdaptive() { BlockFactory blockFactory = blockFactory(); BasicBlockTests.RandomBlock b = randomBlock(); - assertDeduped(blockFactory, b, MultivalueDedupe.dedupeToBlockAdaptive(Block.Ref.floating(b.block()), blockFactory)); + assertDeduped(blockFactory, b, MultivalueDedupe.dedupeToBlockAdaptive(b.block(), blockFactory)); } public void testDedupeViaCopyAndSort() { BlockFactory blockFactory = blockFactory(); BasicBlockTests.RandomBlock b = randomBlock(); - assertDeduped(blockFactory, b, MultivalueDedupe.dedupeToBlockUsingCopyAndSort(Block.Ref.floating(b.block()), blockFactory)); + assertDeduped(blockFactory, b, MultivalueDedupe.dedupeToBlockUsingCopyAndSort(b.block(), blockFactory)); } public void testDedupeViaCopyMissing() { BlockFactory blockFactory = blockFactory(); BasicBlockTests.RandomBlock b = randomBlock(); - assertDeduped(blockFactory, b, MultivalueDedupe.dedupeToBlockUsingCopyMissing(Block.Ref.floating(b.block()), blockFactory)); + assertDeduped(blockFactory, b, MultivalueDedupe.dedupeToBlockUsingCopyMissing(b.block(), blockFactory)); } private BasicBlockTests.RandomBlock randomBlock() { @@ -131,8 +136,8 @@ private BasicBlockTests.RandomBlock randomBlock() { ); } - private void assertDeduped(BlockFactory blockFactory, BasicBlockTests.RandomBlock b, Block.Ref deduped) { - try (Block dedupedBlock = deduped.block()) { + private void assertDeduped(BlockFactory blockFactory, BasicBlockTests.RandomBlock b, Block dedupedBlock) { + try { if (dedupedBlock != b.block()) { assertThat(dedupedBlock.blockFactory(), sameInstance(blockFactory)); } @@ -143,6 +148,8 @@ private void assertDeduped(BlockFactory blockFactory, BasicBlockTests.RandomBloc : containsInAnyOrder(v.stream().collect(Collectors.toSet()).stream().sorted().toArray()); BlockTestUtils.assertPositionValues(dedupedBlock, p, matcher); } + } finally { + Releasables.closeExpectNoException(dedupedBlock); } } @@ -263,7 +270,7 @@ private void assertBooleanHash(Set previousValues, BasicBlockTests.Rand if (previousValues.contains(true)) { everSeen[2] = true; } - IntBlock hashes = new MultivalueDedupeBoolean(Block.Ref.floating(b.block())).hash(everSeen); + IntBlock hashes = new MultivalueDedupeBoolean((BooleanBlock) b.block()).hash(everSeen); List hashedValues = new ArrayList<>(); if (everSeen[1]) { hashedValues.add(false); @@ -277,7 +284,7 @@ private void assertBooleanHash(Set previousValues, BasicBlockTests.Rand private void assertBytesRefHash(Set previousValues, BasicBlockTests.RandomBlock b) { BytesRefHash hash = new BytesRefHash(1, BigArrays.NON_RECYCLING_INSTANCE); previousValues.stream().forEach(hash::add); - MultivalueDedupe.HashResult hashes = new MultivalueDedupeBytesRef(Block.Ref.floating(b.block())).hash(hash); + MultivalueDedupe.HashResult hashes = new MultivalueDedupeBytesRef((BytesRefBlock) b.block()).hash(hash); assertThat(hashes.sawNull(), equalTo(b.values().stream().anyMatch(v -> v == null))); assertHash(b, hashes.ords(), hash.size(), previousValues, i -> hash.get(i, new BytesRef())); } @@ -285,7 +292,7 @@ private void assertBytesRefHash(Set previousValues, BasicBlockTests.Ra private void assertIntHash(Set previousValues, BasicBlockTests.RandomBlock b) { LongHash hash = new LongHash(1, BigArrays.NON_RECYCLING_INSTANCE); previousValues.stream().forEach(hash::add); - MultivalueDedupe.HashResult hashes = new MultivalueDedupeInt(Block.Ref.floating(b.block())).hash(hash); + MultivalueDedupe.HashResult hashes = new MultivalueDedupeInt((IntBlock) b.block()).hash(hash); assertThat(hashes.sawNull(), equalTo(b.values().stream().anyMatch(v -> v == null))); assertHash(b, hashes.ords(), hash.size(), previousValues, i -> (int) hash.get(i)); } @@ -293,7 +300,7 @@ private void assertIntHash(Set previousValues, BasicBlockTests.RandomBl private void assertLongHash(Set previousValues, BasicBlockTests.RandomBlock b) { LongHash hash = new LongHash(1, BigArrays.NON_RECYCLING_INSTANCE); previousValues.stream().forEach(hash::add); - MultivalueDedupe.HashResult hashes = new MultivalueDedupeLong(Block.Ref.floating(b.block())).hash(hash); + MultivalueDedupe.HashResult hashes = new MultivalueDedupeLong((LongBlock) b.block()).hash(hash); assertThat(hashes.sawNull(), equalTo(b.values().stream().anyMatch(v -> v == null))); assertHash(b, hashes.ords(), hash.size(), previousValues, i -> hash.get(i)); } @@ -301,7 +308,7 @@ private void assertLongHash(Set previousValues, BasicBlockTests.RandomBloc private void assertDoubleHash(Set previousValues, BasicBlockTests.RandomBlock b) { LongHash hash = new LongHash(1, BigArrays.NON_RECYCLING_INSTANCE); previousValues.stream().forEach(d -> hash.add(Double.doubleToLongBits(d))); - MultivalueDedupe.HashResult hashes = new MultivalueDedupeDouble(Block.Ref.floating(b.block())).hash(hash); + MultivalueDedupe.HashResult hashes = new MultivalueDedupeDouble((DoubleBlock) b.block()).hash(hash); assertThat(hashes.sawNull(), equalTo(b.values().stream().anyMatch(v -> v == null))); assertHash(b, hashes.ords(), hash.size(), previousValues, i -> Double.longBitsToDouble(hash.get(i))); } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 884cadaeceb16..083ff90397623 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -697,3 +697,52 @@ c:l| m:i | m1:i | c1:l| gender:s 57 | 25945 | 25945 | 57 | M 10 | 25324 | 25324 | 10 | null ; + + +twoCountStarInStats#[skip:-8.11.99] +row x = 1 | stats a = count(*), b = count(*) | stats c = count(*); + +c:long +1 +; + + +twoCountStarInStatsOnRealData-Ignore +from employees | stats a = count(*), b = count(*) | stats c = count(*); + +c:long +1 +; + + +twoStatsSameExp#[skip:-8.11.99] +row x = 1 | stats a = max(x), b = max(x) | stats c = max(a); + +c:integer +1 +; + + +twoCountStarByXInStats#[skip:-8.11.99] +row x = 1, y = 2, z = 3 | stats a = count(*), b = count(*) by x | stats c = count(*); + +c:long +1 +; + + +twoCountStarPlusStatsBy#[skip:-8.11.99] +row x = 1, y = 2, z = 3 | stats a = count(*), b = count(*) | stats c = count(*) by a; + +c:long | a:long +1 | 1 +; + + +twoCountStarByPlusStatsBy#[skip:-8.11.99] +row x = 1, y = 2, z = 3 | stats a = count(*), b = count(*) by x | stats c = count(*) by a; + +c:long | a:long +1 | 1 +; + diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java index bc648678984d5..12ec974142f62 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java @@ -8,9 +8,11 @@ package org.elasticsearch.xpack.esql.action; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; @@ -24,6 +26,7 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.versionfield.Version; @@ -186,6 +189,17 @@ protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Pa return builder.value(UnsupportedValueSource.UNSUPPORTED_OUTPUT); } }; + case "_source" -> new PositionToXContent(block) { + @Override + protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) + throws IOException { + BytesRef val = ((BytesRefBlock) block).getBytesRef(valueIndex, scratch); + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(val))) { + parser.nextToken(); + return builder.copyCurrentStructure(parser); + } + } + }; default -> throw new IllegalArgumentException("can't convert values of type [" + type + "]"); }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java index a5194b1695c2c..3d91eafc8e033 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java @@ -10,11 +10,14 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockStreamInput; @@ -32,17 +35,22 @@ import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.versionfield.Version; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.function.Function; @@ -254,6 +262,17 @@ private static Object valueAt(String dataType, Block block, int offset, BytesRef case "boolean" -> ((BooleanBlock) block).getBoolean(offset); case "version" -> new Version(((BytesRefBlock) block).getBytesRef(offset, scratch)).toString(); case "unsupported" -> UnsupportedValueSource.UNSUPPORTED_OUTPUT; + case "_source" -> { + BytesRef val = ((BytesRefBlock) block).getBytesRef(offset, scratch); + try { + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(val))) { + parser.nextToken(); + yield parser.mapOrdered(); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } default -> throw EsqlIllegalArgumentException.illegalDataType(dataType); }; } @@ -287,6 +306,18 @@ private static Page valuesToPage(List dataTypes, List> valu case "boolean" -> ((BooleanBlock.Builder) builder).appendBoolean(((Boolean) value)); case "null" -> builder.appendNull(); case "version" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new Version(value.toString()).toBytesRef()); + case "_source" -> { + @SuppressWarnings("unchecked") + Map o = (Map) value; + try { + try (XContentBuilder sourceBuilder = JsonXContent.contentBuilder()) { + sourceBuilder.map(o); + ((BytesRefBlock.Builder) builder).appendBytesRef(BytesReference.bytes(sourceBuilder).toBytesRef()); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } default -> throw EsqlIllegalArgumentException.illegalDataType(dataTypes.get(c)); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index bad7dd00d6c18..6d50d41560938 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -272,7 +272,14 @@ private void doLookup( fields.add(new ValuesSourceReaderOperator.FieldInfo(extractField.name(), loaders)); } intermediateOperators.add( - new ValuesSourceReaderOperator(blockFactory, fields, List.of(searchContext.searcher().getIndexReader()), 0) + new ValuesSourceReaderOperator( + blockFactory, + fields, + List.of(new ValuesSourceReaderOperator.ShardContext(searchContext.searcher().getIndexReader(), () -> { + throw new UnsupportedOperationException("can't load _source as part of enrich"); + })), + 0 + ) ); // drop docs block diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index 29b61949b6778..3ae19ceef4d08 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -115,7 +115,6 @@ protected static List> rules() { "Operator Optimization", new CombineProjections(), new CombineEvals(), - new ReplaceDuplicateAggWithEval(), new PruneEmptyPlans(), new PropagateEmptyRelation(), new ConvertStringToByteRef(), @@ -149,7 +148,13 @@ protected static List> rules() { ); var skip = new Batch<>("Skip Compute", new SkipQueryOnLimitZero()); - var cleanup = new Batch<>("Clean Up", new ReplaceLimitAndSortAsTopN()); + var cleanup = new Batch<>( + "Clean Up", + new ReplaceDuplicateAggWithEval(), + // pushing down limits again, because ReplaceDuplicateAggWithEval could create new Project nodes that can still be optimized + new PushDownAndCombineLimits(), + new ReplaceLimitAndSortAsTopN() + ); var defaultTopN = new Batch<>("Add default TopN", new AddDefaultTopN()); var label = new Batch<>("Set as Optimized", Limiter.ONCE, new SetAsOptimized()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index 966db6f02c9ba..f1647ff15d9d0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.planner; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; @@ -64,7 +63,9 @@ public List searchContexts() { public final PhysicalOperation fieldExtractPhysicalOperation(FieldExtractExec fieldExtractExec, PhysicalOperation source) { Layout.Builder layout = source.layout.builder(); var sourceAttr = fieldExtractExec.sourceAttribute(); - List readers = searchContexts.stream().map(s -> s.searcher().getIndexReader()).toList(); + List readers = searchContexts.stream() + .map(s -> new ValuesSourceReaderOperator.ShardContext(s.searcher().getIndexReader(), s::newSourceLoader)) + .toList(); List fields = new ArrayList<>(); int docChannel = source.layout.get(sourceAttr.id()).channel(); for (Attribute attr : fieldExtractExec.attributesToExtract()) { @@ -159,11 +160,14 @@ public final Operator.OperatorFactory ordinalGroupingOperatorFactory( ) { var sourceAttribute = FieldExtractExec.extractSourceAttributesFrom(aggregateExec.child()); int docChannel = source.layout.get(sourceAttribute.id()).channel(); + List shardContexts = searchContexts.stream() + .map(s -> new ValuesSourceReaderOperator.ShardContext(s.searcher().getIndexReader(), s::newSourceLoader)) + .toList(); // The grouping-by values are ready, let's group on them directly. // Costin: why are they ready and not already exposed in the layout? return new OrdinalsGroupingOperator.OrdinalsGroupingOperatorFactory( BlockReaderFactories.loaders(searchContexts, attrSource.name(), EsqlDataTypes.isUnsupported(attrSource.dataType())), - searchContexts.stream().map(s -> s.searcher().getIndexReader()).toList(), + shardContexts, groupElementType, docChannel, attrSource.name(), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index 5acee778ad52c..53f12d949469d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -290,6 +290,7 @@ public static ElementType toElementType(DataType dataType) { if (dataType == DataTypes.KEYWORD || dataType == DataTypes.TEXT || dataType == DataTypes.IP + || dataType == DataTypes.SOURCE || dataType == DataTypes.VERSION || dataType == DataTypes.UNSUPPORTED) { return ElementType.BYTES_REF; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java index b8ba722b989ad..61a739c786dac 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java @@ -34,6 +34,7 @@ import static org.elasticsearch.xpack.ql.type.DataTypes.OBJECT; import static org.elasticsearch.xpack.ql.type.DataTypes.SCALED_FLOAT; import static org.elasticsearch.xpack.ql.type.DataTypes.SHORT; +import static org.elasticsearch.xpack.ql.type.DataTypes.SOURCE; import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSUPPORTED; @@ -64,6 +65,7 @@ public final class EsqlDataTypes { OBJECT, NESTED, SCALED_FLOAT, + SOURCE, VERSION, UNSIGNED_LONG ).sorted(Comparator.comparing(DataType::typeName)).toList(); @@ -158,6 +160,7 @@ public static boolean isRepresentable(DataType t) { && t != SHORT && t != FLOAT && t != SCALED_FLOAT + && t != SOURCE && t != HALF_FLOAT; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index d71d0074c7ec0..3316f76c44680 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; @@ -32,6 +33,7 @@ import org.elasticsearch.test.AbstractChunkedSerializingTestCase; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.type.DataType; @@ -40,6 +42,8 @@ import org.junit.After; import org.junit.Before; +import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.List; @@ -112,6 +116,20 @@ private Page randomPage(List columns) { ); case "version" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new Version(randomIdentifier()).toBytesRef()); case "null" -> builder.appendNull(); + case "_source" -> { + try { + ((BytesRefBlock.Builder) builder).appendBytesRef( + BytesReference.bytes( + JsonXContent.contentBuilder() + .startObject() + .field(randomAlphaOfLength(3), randomAlphaOfLength(10)) + .endObject() + ).toBytesRef() + ); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } default -> throw new UnsupportedOperationException("unsupported data type [" + c + "]"); } return builder.build(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 37fa17d3cb824..35a63d720eb1d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -12,6 +12,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; @@ -30,6 +31,7 @@ import org.elasticsearch.indices.CrankyCircuitBreakerService; import org.elasticsearch.logging.LogManager; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Greatest; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; @@ -58,6 +60,7 @@ import org.junit.runners.model.Statement; import java.io.IOException; +import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; import java.time.Duration; @@ -115,6 +118,15 @@ public static Literal randomLiteral(DataType type) { case "text" -> new BytesRef(randomAlphaOfLength(50)); case "version" -> randomVersion().toBytesRef(); case "null" -> null; + case "_source" -> { + try { + yield BytesReference.bytes( + JsonXContent.contentBuilder().startObject().field(randomAlphaOfLength(3), randomAlphaOfLength(10)).endObject() + ).toBytesRef(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } default -> throw new IllegalArgumentException("can't make random values for [" + type.typeName() + "]"); }, type); } @@ -673,6 +685,30 @@ protected static List errorsForCasesWithoutExamples(List failureForCasesWithoutExamples(List testCaseSuppliers) { + typesRequired(testCaseSuppliers); + List suppliers = new ArrayList<>(testCaseSuppliers.size()); + suppliers.addAll(testCaseSuppliers); + + Set> valid = testCaseSuppliers.stream().map(TestCaseSupplier::types).collect(Collectors.toSet()); + List> validPerPosition = validPerPosition(valid); + + testCaseSuppliers.stream() + .map(s -> s.types().size()) + .collect(Collectors.toSet()) + .stream() + .flatMap(count -> allPermutations(count)) + .filter(types -> valid.contains(types) == false) + .map(types -> new TestCaseSupplier("type error for " + TestCaseSupplier.nameFromTypes(types), types, () -> { + throw new IllegalStateException("must implement a case for " + types); + })) + .forEach(suppliers::add); + return suppliers; + } + private static void typesRequired(List suppliers) { String bad = suppliers.stream().filter(s -> s.types() == null).map(s -> s.name()).collect(Collectors.joining("\n")); if (bad.equals("") == false) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullTests.java index b80b64a0783ba..2c0864d0a8fdc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullTests.java @@ -10,45 +10,64 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; -import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; -import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import org.hamcrest.Matcher; +import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; -public class IsNotNullTests extends AbstractScalarFunctionTestCase { +public class IsNotNullTests extends AbstractFunctionTestCase { public IsNotNullTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @ParametersFactory public static Iterable parameters() { - return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Keyword Not Null", () -> { - return new TestCaseSupplier.TestCase( - List.of(new TestCaseSupplier.TypedData(new BytesRef("cat"), DataTypes.KEYWORD, "exp")), - "IsNotNullEvaluator[field=Attribute[channel=0]]", - DataTypes.BOOLEAN, - equalTo(true) + List suppliers = new ArrayList<>(); + for (DataType type : EsqlDataTypes.types()) { + if (false == EsqlDataTypes.isRepresentable(type)) { + continue; + } + if (type != DataTypes.NULL) { + suppliers.add( + new TestCaseSupplier( + "non-null " + type.typeName(), + List.of(type), + () -> new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(randomLiteral(type).value(), type, "v")), + "IsNotNullEvaluator[field=Attribute[channel=0]]", + DataTypes.BOOLEAN, + equalTo(true) + ) + ) + ); + } + suppliers.add( + new TestCaseSupplier( + "null " + type.typeName(), + List.of(type), + () -> new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(null, type, "v")), + "IsNotNullEvaluator[field=Attribute[channel=0]]", + DataTypes.BOOLEAN, + equalTo(false) + ) + ) ); - }))); - } - - @Override - protected DataType expectedType(List argTypes) { - return DataTypes.BOOLEAN; + } + return parameterSuppliersFromTypedData(failureForCasesWithoutExamples(suppliers)); } @Override @@ -56,27 +75,11 @@ protected void assertSimpleWithNulls(List data, Block value, int nullBlo assertFalse(((BooleanBlock) value).asVector().getBoolean(0)); } - @Override - protected List argSpec() { - return List.of(required(EsqlDataTypes.types().toArray(DataType[]::new))); - } - @Override protected Expression build(Source source, List args) { return new IsNotNull(Source.EMPTY, args.get(0)); } - public void testAllTypes() { - for (DataType type : EsqlDataTypes.types()) { - if (DataTypes.isPrimitive(type) == false) { - continue; - } - Literal lit = randomLiteral(EsqlDataTypes.widenSmallNumericTypes(type)); - assertThat(new IsNotNull(Source.EMPTY, lit).fold(), equalTo(lit.value() != null)); - assertThat(new IsNotNull(Source.EMPTY, new Literal(Source.EMPTY, null, type)).fold(), equalTo(false)); - } - } - @Override protected Matcher allNullsMatcher() { return equalTo(false); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullTests.java index 3702d4814ce02..c6c67d67375db 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullTests.java @@ -10,45 +10,64 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; -import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; -import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNull; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import org.hamcrest.Matcher; +import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; -public class IsNullTests extends AbstractScalarFunctionTestCase { +public class IsNullTests extends AbstractFunctionTestCase { public IsNullTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @ParametersFactory public static Iterable parameters() { - return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Keyword is Null", () -> { - return new TestCaseSupplier.TestCase( - List.of(new TestCaseSupplier.TypedData(new BytesRef("cat"), DataTypes.KEYWORD, "exp")), - "IsNullEvaluator[field=Attribute[channel=0]]", - DataTypes.BOOLEAN, - equalTo(false) + List suppliers = new ArrayList<>(); + for (DataType type : EsqlDataTypes.types()) { + if (false == EsqlDataTypes.isRepresentable(type)) { + continue; + } + if (type != DataTypes.NULL) { + suppliers.add( + new TestCaseSupplier( + "non-null " + type.typeName(), + List.of(type), + () -> new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(randomLiteral(type).value(), type, "v")), + "IsNullEvaluator[field=Attribute[channel=0]]", + DataTypes.BOOLEAN, + equalTo(false) + ) + ) + ); + } + suppliers.add( + new TestCaseSupplier( + "null " + type.typeName(), + List.of(type), + () -> new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(null, type, "v")), + "IsNullEvaluator[field=Attribute[channel=0]]", + DataTypes.BOOLEAN, + equalTo(true) + ) + ) ); - }))); - } - - @Override - protected DataType expectedType(List argTypes) { - return DataTypes.BOOLEAN; + } + return parameterSuppliersFromTypedData(failureForCasesWithoutExamples(suppliers)); } @Override @@ -56,27 +75,11 @@ protected void assertSimpleWithNulls(List data, Block value, int nullBlo assertTrue(((BooleanBlock) value).asVector().getBoolean(0)); } - @Override - protected List argSpec() { - return List.of(required(EsqlDataTypes.types().toArray(DataType[]::new))); - } - @Override protected Expression build(Source source, List args) { return new IsNull(Source.EMPTY, args.get(0)); } - public void testAllTypes() { - for (DataType type : EsqlDataTypes.types()) { - if (DataTypes.isPrimitive(type) == false) { - continue; - } - Literal lit = randomLiteral(EsqlDataTypes.widenSmallNumericTypes(type)); - assertThat(new IsNull(Source.EMPTY, lit).fold(), equalTo(lit.value() == null)); - assertThat(new IsNull(Source.EMPTY, new Literal(Source.EMPTY, null, type)).fold(), equalTo(true)); - } - } - @Override protected Matcher allNullsMatcher() { return equalTo(true); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java index b4cc70720c866..f2421fbc6ded9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java @@ -132,7 +132,7 @@ ProcessContext addProcessContext(Long id, ProcessContext processContext) { } public void startDeployment(TrainedModelDeploymentTask task, ActionListener finalListener) { - logger.info("[{}] Starting model deployment", task.getDeploymentId()); + logger.info("[{}] Starting model deployment of model [{}]", task.getDeploymentId(), task.getModelId()); if (processContextByAllocation.size() >= maxProcesses) { finalListener.onFailure( diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java index b054095499a30..ec8bef6e14c24 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java @@ -77,7 +77,7 @@ public class MonitoringTemplateRegistry extends IndexTemplateRegistry { * writes monitoring data in ECS format as of 8.0. These templates define the ECS schema as well as alias fields for the old monitoring * mappings that point to the corresponding ECS fields. */ - public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 9; + public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 10; private static final String STACK_MONITORING_REGISTRY_VERSION_VARIABLE = "xpack.stack.monitoring.template.release.version"; private static final String STACK_TEMPLATE_VERSION = "8"; private static final String STACK_TEMPLATE_VERSION_VARIABLE = "xpack.stack.monitoring.template.version"; diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/MetadataAttribute.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/MetadataAttribute.java index c61fe9f240ea7..3b2cb542ceb4c 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/MetadataAttribute.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/MetadataAttribute.java @@ -8,6 +8,8 @@ package org.elasticsearch.xpack.ql.expression; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; @@ -24,8 +26,10 @@ public class MetadataAttribute extends TypedAttribute { tuple(DataTypes.LONG, false), // _version field is not searchable "_index", tuple(DataTypes.KEYWORD, true), - "_id", - tuple(DataTypes.KEYWORD, false) // actually searchable, but fielddata access on the _id field is disallowed by default + IdFieldMapper.NAME, + tuple(DataTypes.KEYWORD, false), // actually searchable, but fielddata access on the _id field is disallowed by default + SourceFieldMapper.NAME, + tuple(DataTypes.SOURCE, false) ); private final boolean searchable; diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/DataTypes.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/DataTypes.java index a080e673fe50a..6aa47f7c817a7 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/DataTypes.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/DataTypes.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.ql.type; +import org.elasticsearch.index.mapper.SourceFieldMapper; + import java.math.BigInteger; import java.time.ZonedDateTime; import java.util.Collection; @@ -52,6 +54,14 @@ public final class DataTypes { public static final DataType OBJECT = new DataType("object", 0, false, false, false); public static final DataType NESTED = new DataType("nested", 0, false, false, false); //end::noformat + public static final DataType SOURCE = new DataType( + SourceFieldMapper.NAME, + SourceFieldMapper.NAME, + Integer.MAX_VALUE, + false, + false, + false + ); private static final Collection TYPES = Stream.of( UNSUPPORTED, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/PreAuthorizationUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/PreAuthorizationUtils.java index 67a4d14177fac..459af5f9a1439 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/PreAuthorizationUtils.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/PreAuthorizationUtils.java @@ -45,7 +45,6 @@ public final class PreAuthorizationUtils { SearchTransportService.QUERY_ACTION_NAME, SearchTransportService.QUERY_ID_ACTION_NAME, SearchTransportService.FETCH_ID_ACTION_NAME, - SearchTransportService.QUERY_CAN_MATCH_NAME, SearchTransportService.QUERY_CAN_MATCH_NODE_NAME ) ); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml index 7e80ee8dd6904..d0c1e6833ab08 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/30_types.yml @@ -679,3 +679,62 @@ unsigned_long: - match: { columns.0.type: unsigned_long } - length: { values: 1 } - match: { values.0.0: [ 0, 1, 9223372036854775808, 18446744073709551615 ] } + +--- +_source: + - skip: + version: " - 8.11.99" + reason: "_source is available in 8.12+" + + - do: + bulk: + index: test + refresh: true + body: + - { "index" : { "_index" : "test", "_id" : "id-1" } } + - { "wow": 1, "such": "_source", "you'd": "never", "expect": ["amazing", "source"] } + + - do: + esql.query: + body: + query: 'FROM test [METADATA _source] | KEEP _source | LIMIT 1' + - match: { columns.0.name: _source } + - match: { columns.0.type: _source } + - length: { values: 1 } + - match: + values.0.0: + wow: 1 + such: _source + "you'd": never + expect: [amazing, source] + +--- +_source disabled: + - skip: + version: " - 8.11.99" + reason: "_source is available in 8.12+" + + - do: + indices.create: + index: test + body: + mappings: + _source: + enabled: false + + - do: + bulk: + index: test + refresh: true + body: + - { "index" : { "_index" : "test", "_id" : "id-1" } } + - { "wow": 1, "such": "_source", "you'd": "never", "expect": ["amazing", "source"] } + + - do: + esql.query: + body: + query: 'FROM test [METADATA _source] | KEEP _source | LIMIT 1' + - match: { columns.0.name: _source } + - match: { columns.0.type: _source } + - length: { values: 1 } + - match: { values.0.0: null } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_tsdb.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_tsdb.yml index 1f9dc67dbfbbd..69bd944430f04 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_tsdb.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_tsdb.yml @@ -226,3 +226,38 @@ from index pattern explicit counter use: esql.query: body: query: 'FROM test* | keep *.tx' + + +--- +_source: + - skip: + version: " - 8.11.99" + reason: "_source is available in 8.12+" + + - do: + bulk: + index: test + refresh: true + body: + - { "index" : { "_index" : "test", "_id" : "id-1" } } + - { "wow": 1, "such": "_source", "you'd": "never", "expect": ["amazing", "source"] } + + - do: + esql.query: + body: + query: 'FROM test [METADATA _source] | WHERE @timestamp == "2021-04-28T18:50:23.142Z" | KEEP _source | LIMIT 1' + - match: { columns.0.name: _source } + - match: { columns.0.type: _source } + - length: { values: 1 } + - match: + values.0.0: + "@timestamp": "2021-04-28T18:50:23.142Z" + metricset: pod + k8s: + pod: + ip: 10.10.55.3 + name: dog + network: + rx: 530600088 + tx: 1434577921 + uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java index 92c751ca28948..27250dd4e3367 100644 --- a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java @@ -21,6 +21,8 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -131,7 +133,6 @@ public void testAutoFollowing() throws Exception { createLeaderIndex(leaderClient(), leaderIndex1); index(leaderClient(), leaderIndex1, 64); assertBusy(() -> { - String followerIndex = "copy-" + leaderIndex1; assertThat(getNumberOfSuccessfulFollowedIndices(), equalTo(1)); assertTotalHitCount(followerIndex, 64, followerClient()); @@ -205,7 +206,6 @@ public void testAutoFollowing() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100291") public void testCannotFollowLeaderInUpgradedCluster() throws Exception { if (upgradeState != UpgradeState.ALL) { return; @@ -226,8 +226,20 @@ public void testCannotFollowLeaderInUpgradedCluster() throws Exception { ResponseException.class, () -> followIndex(leaderClient(), "follower", "not_supported", "not_supported") ); - assertThat(e.getMessage(), containsString("the snapshot was created with Elasticsearch version [")); - assertThat(e.getMessage(), containsString("] which is higher than the version of this node [")); + + assertThat( + e.getMessage(), + anyOf( + allOf( + containsString("the snapshot was created with index version ["), + containsString("] which is higher than the version used by this node [") + ), + allOf( + containsString("the snapshot was created with Elasticsearch version ["), + containsString("] which is higher than the version of this node [") + ) + ) + ); } else if (clusterName == ClusterName.LEADER) { // At this point all nodes in both clusters have been updated and // the leader cluster can now follow not_supported index in the follower cluster: