diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index e85f9a5608644..376ad4d4e6716 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -27,7 +27,7 @@ archivesBaseName = 'elasticsearch-benchmarks' test.enabled = false dependencies { - compile("org.elasticsearch:elasticsearch:${version}") { + compile(project(":server")) { // JMH ships with the conflicting version 4.6. This prevents us from using jopt-simple in benchmarks (which should be ok) but allows // us to invoke the JMH uberjar as usual. exclude group: 'net.sf.jopt-simple', module: 'jopt-simple' diff --git a/build.gradle b/build.gradle index c91c1554bd405..9a6335eea6e76 100644 --- a/build.gradle +++ b/build.gradle @@ -209,68 +209,7 @@ allprojects { javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet') } - /* Sets up the dependencies that we build as part of this project but - register as though they were external to resolve internally. We register - them as external dependencies so the build plugin that we use can be used - to build elasticsearch plugins outside of the elasticsearch source tree. */ - ext.projectSubstitutions = [ - "org.elasticsearch.gradle:build-tools:${version}": ':build-tools', - "org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec', - "org.elasticsearch:elasticsearch:${version}": ':server', - "org.elasticsearch:elasticsearch-cli:${version}": ':libs:elasticsearch-cli', - "org.elasticsearch:elasticsearch-core:${version}": ':libs:core', - "org.elasticsearch:elasticsearch-nio:${version}": ':libs:nio', - "org.elasticsearch:elasticsearch-x-content:${version}": ':libs:x-content', - "org.elasticsearch:elasticsearch-geo:${version}": ':libs:elasticsearch-geo', - "org.elasticsearch:elasticsearch-secure-sm:${version}": ':libs:secure-sm', - "org.elasticsearch:elasticsearch-ssl-config:${version}": ':libs:elasticsearch-ssl-config', - "org.elasticsearch.client:elasticsearch-rest-client:${version}": ':client:rest', - "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}": ':client:sniffer', - "org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}": ':client:rest-high-level', - "org.elasticsearch.client:test:${version}": ':client:test', - "org.elasticsearch.client:transport:${version}": ':client:transport', - "org.elasticsearch.plugin:elasticsearch-scripting-painless-spi:${version}": ':modules:lang-painless:spi', - "org.elasticsearch.test:framework:${version}": ':test:framework', - "org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage', - "org.elasticsearch.xpack.test:feature-aware:${version}": ':x-pack:test:feature-aware', - // for transport client - "org.elasticsearch.plugin:transport-netty4-client:${version}": ':modules:transport-netty4', - "org.elasticsearch.plugin:reindex-client:${version}": ':modules:reindex', - "org.elasticsearch.plugin:lang-mustache-client:${version}": ':modules:lang-mustache', - "org.elasticsearch.plugin:parent-join-client:${version}": ':modules:parent-join', - "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}": ':modules:aggs-matrix-stats', - "org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator', - "org.elasticsearch.plugin:rank-eval-client:${version}": ':modules:rank-eval', - // for security example plugins - "org.elasticsearch.plugin:x-pack-core:${version}": ':x-pack:plugin:core' - ] - - /* - * Gradle only resolve project substitutions during dependency resolution but - * we sometimes want to do the resolution at other times. This creates a - * convenient method we can call to do it. - */ - ext.dependencyToProject = { Dependency dep -> - if (dep instanceof ProjectDependency) { - return dep.dependencyProject - } else { - String substitution = projectSubstitutions.get("${dep.group}:${dep.name}:${dep.version}") - if (substitution != null) { - return findProject(substitution) - } - return null - } - } - project.afterEvaluate { - configurations.matching { it.canBeResolved }.all { - resolutionStrategy.dependencySubstitution { DependencySubstitutions subs -> - projectSubstitutions.each { k,v -> - subs.substitute(subs.module(k)).with(subs.project(v)) - } - } - } - // Handle javadoc dependencies across projects. Order matters: the linksOffline for // org.elasticsearch:elasticsearch must be the last one or all the links for the // other packages (e.g org.elasticsearch.client) will point to server rather than @@ -279,10 +218,10 @@ allprojects { String artifactsHost = VersionProperties.elasticsearch.endsWith("-SNAPSHOT") ? "https://snapshots.elastic.co" : "https://artifacts.elastic.co" Closure sortClosure = { a, b -> b.group <=> a.group } Closure depJavadocClosure = { shadowed, dep -> - if (dep.group == null || false == dep.group.startsWith('org.elasticsearch')) { + if ((dep instanceof ProjectDependency) == false) { return } - Project upstreamProject = project.ext.dependencyToProject(dep) + Project upstreamProject = dep.dependencyProject if (upstreamProject == null) { return } @@ -338,8 +277,8 @@ gradle.projectsEvaluated { integTest.mustRunAfter test } configurations.matching { it.canBeResolved }.all { Configuration configuration -> - dependencies.all { Dependency dep -> - Project upstreamProject = dependencyToProject(dep) + dependencies.matching { it instanceof ProjectDependency }.all { ProjectDependency dep -> + Project upstreamProject = dep.dependencyProject if (upstreamProject != null) { if (project.path == upstreamProject.path) { // TODO: distribution integ tests depend on themselves (!), fix that diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index a974e866465be..2f62a4024276a 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -148,6 +148,11 @@ if (project != rootProject) { distribution project(':distribution:archives:linux-tar') distribution project(':distribution:archives:oss-linux-tar') } + + // for external projects we want to remove the marker file indicating we are running the Elasticsearch project + processResources { + exclude 'buildSrc.marker' + } String localDownloads = "${rootProject.buildDir}/local-downloads" task setupLocalDownloads(type:Copy) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index fffbcd273dcb7..029993f40db64 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -18,6 +18,7 @@ */ package org.elasticsearch.gradle +import com.github.jengelman.gradle.plugins.shadow.ShadowExtension import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar import groovy.transform.CompileDynamic @@ -77,7 +78,6 @@ import org.gradle.authentication.http.HttpHeaderAuthentication import org.gradle.external.javadoc.CoreJavadocOptions import org.gradle.internal.jvm.Jvm import org.gradle.language.base.plugins.LifecycleBasePlugin -import org.gradle.process.CommandLineArgumentProvider import org.gradle.process.ExecResult import org.gradle.process.ExecSpec import org.gradle.util.GradleVersion @@ -259,7 +259,7 @@ class BuildPlugin implements Plugin { if (ext.get('buildDocker')) { (ext.get('requiresDocker') as List).add(task) } else { - task.enabled = false + task.onlyIf { false } } } @@ -530,39 +530,43 @@ class BuildPlugin implements Plugin { static void configurePomGeneration(Project project) { // Only works with `enableFeaturePreview('STABLE_PUBLISHING')` // https://github.com/gradle/gradle/issues/5696#issuecomment-396965185 - project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask -> - // The GenerateMavenPom task is aggressive about setting the destination, instead of fighting it, - // just make a copy. - ExtraPropertiesExtension ext = generatePOMTask.extensions.getByType(ExtraPropertiesExtension) - ext.set('pomFileName', null) - generatePOMTask.doLast { - project.copy { CopySpec spec -> - spec.from generatePOMTask.destination - spec.into "${project.buildDir}/distributions" - spec.rename { - ext.has('pomFileName') && ext.get('pomFileName') == null ? - "${project.convention.getPlugin(BasePluginConvention).archivesBaseName}-${project.version}.pom" : - ext.get('pomFileName') + // dummy task to depend on the real pom generation + project.plugins.withType(MavenPublishPlugin).whenPluginAdded { + Task generatePomTask = project.tasks.create("generatePom") + Task assemble = project.tasks.findByName('assemble') + if (assemble) { + assemble.dependsOn(generatePomTask) + } + project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom pomTask -> + // The GenerateMavenPom task is aggressive about setting the destination, instead of fighting it, + // just make a copy. + ExtraPropertiesExtension ext = pomTask.extensions.getByType(ExtraPropertiesExtension) + ext.set('pomFileName', null) + pomTask.doLast { + project.copy { CopySpec spec -> + spec.from pomTask.destination + spec.into "${project.buildDir}/distributions" + spec.rename { + ext.has('pomFileName') && ext.get('pomFileName') == null ? + "${project.convention.getPlugin(BasePluginConvention).archivesBaseName}-${project.version}.pom" : + ext.get('pomFileName') + } } } } - // build poms with assemble (if the assemble task exists) - Task assemble = project.tasks.findByName('assemble') - if (assemble && assemble.enabled) { - assemble.dependsOn(generatePOMTask) - } - } - project.plugins.withType(MavenPublishPlugin).whenPluginAdded { + generatePomTask.dependsOn = ['generatePomFileForNebulaPublication'] PublishingExtension publishing = project.extensions.getByType(PublishingExtension) publishing.publications.all { MavenPublication publication -> // we only deal with maven // add exclusions to the pom directly, for each of the transitive deps of this project's deps publication.pom.withXml(fixupDependencies(project)) } project.plugins.withType(ShadowPlugin).whenPluginAdded { - MavenPublication publication = publishing.publications.maybeCreate('nebula', MavenPublication) + MavenPublication publication = publishing.publications.maybeCreate('shadow', MavenPublication) publication.with { - artifacts = [ project.tasks.getByName('shadowJar') ] + ShadowExtension shadow = project.extensions.getByType(ShadowExtension) + shadow.component(publication) } + generatePomTask.dependsOn = ['generatePomFileForShadowPublication'] } } } @@ -690,6 +694,12 @@ class BuildPlugin implements Plugin { project.tasks.withType(Jar) { Jar jarTask -> // we put all our distributable files under distributions jarTask.destinationDir = new File(project.buildDir, 'distributions') + project.plugins.withType(ShadowPlugin).whenPluginAdded { + // ensure the original jar task places its output in 'libs' so we don't overwrite it with the shadowjar + if (jarTask instanceof ShadowJar == false) { + jarTask.destinationDir = new File(project.buildDir, 'libs') + } + } // fixup the jar manifest jarTask.doFirst { // this doFirst is added before the info plugin, therefore it will run @@ -740,12 +750,6 @@ class BuildPlugin implements Plugin { } } project.plugins.withType(ShadowPlugin).whenPluginAdded { - /* - * When we use the shadow plugin we entirely replace the - * normal jar with the shadow jar so we no longer want to run - * the jar task. - */ - project.tasks.getByName(JavaPlugin.JAR_TASK_NAME).enabled = false project.tasks.getByName('shadowJar').configure { ShadowJar shadowJar -> /* * Replace the default "shadow" classifier with null @@ -764,7 +768,6 @@ class BuildPlugin implements Plugin { } // Make sure we assemble the shadow jar project.tasks.getByName(BasePlugin.ASSEMBLE_TASK_NAME).dependsOn project.tasks.getByName('shadowJar') - project.artifacts.add('apiElements', project.tasks.getByName('shadowJar')) } } @@ -878,6 +881,7 @@ class BuildPlugin implements Plugin { project.plugins.withType(ShadowPlugin).whenPluginAdded { // Test against a shadow jar if we made one + test.classpath -= project.configurations.getByName('bundle') test.classpath -= project.tasks.getByName('compileJava').outputs.files test.classpath += project.tasks.getByName('shadowJar').outputs.files diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index c391757099097..a25c0415f6483 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -26,6 +26,7 @@ import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.test.RestIntegTestTask import org.elasticsearch.gradle.test.RunTask import org.elasticsearch.gradle.testclusters.TestClustersPlugin +import org.elasticsearch.gradle.tool.ClasspathUtils import org.gradle.api.InvalidUserDataException import org.gradle.api.Plugin import org.gradle.api.Project @@ -136,8 +137,13 @@ class PluginBuildPlugin implements Plugin { private static void configureDependencies(Project project) { project.dependencies { - compileOnly "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}" - testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}" + if (ClasspathUtils.isElasticsearchProject()) { + compileOnly project.project(':server') + testCompile project.project(':test:framework') + } else { + compileOnly "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}" + testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}" + } // we "upgrade" these optional deps to provided for plugins, since they will run // with a full elasticsearch server that includes optional deps compileOnly "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}" diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index f656f177ce67b..a5d4f3fcd9495 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -23,11 +23,13 @@ import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask import org.elasticsearch.gradle.VersionProperties +import org.elasticsearch.gradle.tool.ClasspathUtils import org.gradle.api.JavaVersion import org.gradle.api.Project import org.gradle.api.Task import org.gradle.api.plugins.JavaBasePlugin import org.gradle.api.plugins.quality.Checkstyle + /** * Validation tasks which should be run before committing. These run before tests. */ @@ -40,18 +42,18 @@ class PrecommitTasks { public static Task create(Project project, boolean includeDependencyLicenses) { project.configurations.create("forbiddenApisCliJar") project.dependencies { - forbiddenApisCliJar ('de.thetaphi:forbiddenapis:2.6') + forbiddenApisCliJar('de.thetaphi:forbiddenapis:2.6') } List precommitTasks = [ - configureCheckstyle(project), - configureForbiddenApisCli(project), - project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), - project.tasks.create('licenseHeaders', LicenseHeadersTask.class), - project.tasks.create('filepermissions', FilePermissionsTask.class), - configureJarHell(project), - configureThirdPartyAudit(project), - configureTestingConventions(project) + configureCheckstyle(project), + configureForbiddenApisCli(project), + project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), + project.tasks.create('licenseHeaders', LicenseHeadersTask.class), + project.tasks.create('filepermissions', FilePermissionsTask.class), + configureJarHell(project), + configureThirdPartyAudit(project), + configureTestingConventions(project) ] // tasks with just tests don't need dependency licenses, so this flag makes adding @@ -85,10 +87,10 @@ class PrecommitTasks { } return project.tasks.create([ - name: 'precommit', - group: JavaBasePlugin.VERIFICATION_GROUP, - description: 'Runs all non-test checks.', - dependsOn: precommitTasks + name : 'precommit', + group : JavaBasePlugin.VERIFICATION_GROUP, + description: 'Runs all non-test checks.', + dependsOn : precommitTasks ]) } @@ -168,7 +170,7 @@ class PrecommitTasks { ) } } - Task forbiddenApis = project.tasks.getByName("forbiddenApis") + Task forbiddenApis = project.tasks.getByName("forbiddenApis") forbiddenApis.group = "" return forbiddenApis } @@ -211,7 +213,7 @@ class PrecommitTasks { project.checkstyle { config = project.resources.text.fromFile(checkstyleConf, 'UTF-8') configProperties = [ - suppressions: checkstyleSuppressions + suppressions: checkstyleSuppressions ] toolVersion = CHECKSTYLE_VERSION } @@ -229,9 +231,11 @@ class PrecommitTasks { } private static Task configureLoggerUsage(Project project) { + Object dependency = ClasspathUtils.isElasticsearchProject() ? project.project(':test:logger-usage') : + "org.elasticsearch.test:logger-usage:${VersionProperties.elasticsearch}" + project.configurations.create('loggerUsagePlugin') - project.dependencies.add('loggerUsagePlugin', - "org.elasticsearch.test:logger-usage:${VersionProperties.elasticsearch}") + project.dependencies.add('loggerUsagePlugin', dependency) return project.tasks.create('loggerUsageCheck', LoggerUsageTask.class) { classpath = project.configurations.loggerUsagePlugin } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 43e17eb73b96c..372c8bc70004c 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -252,7 +252,7 @@ class RestIntegTestTask extends DefaultTask { restSpec } project.dependencies { - restSpec "org.elasticsearch:rest-api-spec:${VersionProperties.elasticsearch}" + restSpec project.project(':rest-api-spec') } Task copyRestSpec = project.tasks.findByName('copyRestSpec') if (copyRestSpec != null) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy index c9a26eb74b54d..f3ebfecc32267 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy @@ -79,7 +79,7 @@ class StandaloneRestTestPlugin implements Plugin { // create a compileOnly configuration as others might expect it project.configurations.create("compileOnly") - project.dependencies.add('testCompile', "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}") + project.dependencies.add('testCompile', project.project(':test:framework')) EclipseModel eclipse = project.extensions.getByType(EclipseModel) eclipse.classpath.sourceSets = [testSourceSet] diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java index a408b66ec817d..d4f0d9941dacf 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java @@ -23,6 +23,7 @@ import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; +import org.gradle.api.Task; import org.gradle.api.UnknownTaskException; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.ConfigurationContainer; @@ -165,7 +166,12 @@ private static void setupRootJdkDownload(Project rootProject, String platform, S } String extractDir = rootProject.getBuildDir().toPath().resolve("jdks/openjdk-" + jdkVersion + "_" + platform).toString(); TaskProvider extractTask = rootProject.getTasks().register(extractTaskName, Copy.class, copyTask -> { - copyTask.doFirst(t -> rootProject.delete(extractDir)); + copyTask.doFirst(new Action() { + @Override + public void execute(Task t) { + rootProject.delete(extractDir); + } + }); copyTask.into(extractDir); copyTask.from(fileGetter, removeRootDir); }); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/tool/ClasspathUtils.java b/buildSrc/src/main/java/org/elasticsearch/gradle/tool/ClasspathUtils.java new file mode 100644 index 0000000000000..40ec6bd71830a --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/tool/ClasspathUtils.java @@ -0,0 +1,23 @@ +package org.elasticsearch.gradle.tool; + +public class ClasspathUtils { + private static boolean isElasticsearchProject; + + static { + // look for buildSrc marker file, if it exists then we are running in the context of the elastic/elasticsearch build + isElasticsearchProject = ClasspathUtils.class.getResource("/buildSrc.marker") != null; + } + + private ClasspathUtils() { + } + + /** + * Determine if we are running in the context of the `elastic/elasticsearch` project. This method will return {@code false} when + * the build-tools project is pulled in as an external dependency. + * + * @return if we are currently running in the `elastic/elasticsearch` project + */ + public static boolean isElasticsearchProject() { + return isElasticsearchProject; + } +} diff --git a/buildSrc/src/main/resources/buildSrc.marker b/buildSrc/src/main/resources/buildSrc.marker new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/client/benchmark/build.gradle b/client/benchmark/build.gradle index a53f102034009..eb2c634f97203 100644 --- a/client/benchmark/build.gradle +++ b/client/benchmark/build.gradle @@ -34,12 +34,12 @@ test.enabled = false dependencies { compile 'org.apache.commons:commons-math3:3.2' - compile("org.elasticsearch.client:elasticsearch-rest-client:${version}") + compile project(":client:rest") // bottleneck should be the client, not Elasticsearch compile project(path: ':client:client-benchmark-noop-api-plugin') // for transport client - compile("org.elasticsearch:elasticsearch:${version}") - compile("org.elasticsearch.client:transport:${version}") + compile project(":server") + compile project(":client:transport") compile project(path: ':modules:transport-netty4', configuration: 'runtime') compile project(path: ':modules:reindex', configuration: 'runtime') compile project(path: ':modules:lang-mustache', configuration: 'runtime') diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index c9a3fc486f1da..0d3b2e24fcb22 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -46,32 +46,34 @@ idea { } dependencies { - /* - * Everything in the "shadow" configuration is *not* copied into the - * shadowJar. - */ - compile "org.elasticsearch:elasticsearch:${version}" - compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" - compile "org.elasticsearch.plugin:parent-join-client:${version}" - compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}" - compile "org.elasticsearch.plugin:rank-eval-client:${version}" - compile "org.elasticsearch.plugin:lang-mustache-client:${version}" + compile project(':server') + compile project(':client:rest') + compile project(':modules:parent-join') + compile project(':modules:aggs-matrix-stats') + compile project(':modules:rank-eval') + compile project(':modules:lang-mustache') + shadow project(':server') + shadow project(':client:rest') + bundle project(':modules:parent-join') + bundle project(':modules:aggs-matrix-stats') + bundle project(':modules:rank-eval') + bundle project(':modules:lang-mustache') - testCompile "org.elasticsearch.client:test:${version}" - testCompile "org.elasticsearch.test:framework:${version}" + testCompile project(':client:test') + testCompile project(':test:framework') testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" //this is needed to make RestHighLevelClientTests#testApiNamingConventions work from IDEs - testCompile "org.elasticsearch:rest-api-spec:${version}" + testCompile project(":rest-api-spec") // Needed for serialization tests: // (In order to serialize a server side class to a client side class or the other way around) if (isEclipse == false || project.path == ":client:rest-high-level-tests") { - testCompile("org.elasticsearch.plugin:x-pack-core:${version}") { + testCompile(project(':x-pack:plugin:core')) { exclude group: 'org.elasticsearch', module: 'elasticsearch-rest-high-level-client' } } - restSpec "org.elasticsearch:rest-api-spec:${version}" + restSpec project(':rest-api-spec') } //we need to copy the yaml spec so we can check naming (see RestHighlevelClientTests#testApiNamingConventions) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index cbb1d95feae1b..a5a57e4d6b8fa 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -22,8 +22,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; @@ -47,6 +45,8 @@ import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.core.ShardsAcknowledgedResponse; +import org.elasticsearch.client.indices.AnalyzeRequest; +import org.elasticsearch.client.indices.AnalyzeResponse; import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.indices.CreateIndexResponse; import org.elasticsearch.client.indices.FreezeIndexRequest; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java index 5cbab84313886..7a67fe71348f4 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java @@ -26,7 +26,6 @@ import org.apache.http.client.methods.HttpPut; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; @@ -41,6 +40,7 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; +import org.elasticsearch.client.indices.AnalyzeRequest; import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.indices.FreezeIndexRequest; import org.elasticsearch.client.indices.GetFieldMappingsRequest; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 9aec180a1a748..25fb1818bb56f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -33,7 +33,6 @@ import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.explain.ExplainRequest; @@ -52,6 +51,7 @@ import org.elasticsearch.client.core.CountRequest; import org.elasticsearch.client.core.MultiTermVectorsRequest; import org.elasticsearch.client.core.TermVectorsRequest; +import org.elasticsearch.client.indices.AnalyzeRequest; import org.elasticsearch.client.security.RefreshPolicy; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Nullable; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/AnalyzeRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/AnalyzeRequest.java new file mode 100644 index 0000000000000..1aed59227e8bd --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/AnalyzeRequest.java @@ -0,0 +1,343 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * A request to analyze text + */ +public class AnalyzeRequest implements Validatable, ToXContentObject { + + private String index; + + private String[] text; + + private String analyzer; + + private NameOrDefinition tokenizer; + + private final List tokenFilters = new ArrayList<>(); + + private final List charFilters = new ArrayList<>(); + + private String field; + + private boolean explain = false; + + private String[] attributes = Strings.EMPTY_ARRAY; + + private String normalizer; + + /** + * Analyzes text using a global analyzer + */ + public static AnalyzeRequest withGlobalAnalyzer(String analyzer, String... text) { + return new AnalyzeRequest(null, analyzer, null, null, text); + } + + /** + * Analyzes text using a custom analyzer built from global components + */ + public static CustomAnalyzerBuilder buildCustomAnalyzer(String tokenizer) { + return new CustomAnalyzerBuilder(null, new NameOrDefinition(tokenizer)); + } + + /** + * Analyzes text using a custom analyzer built from global components + */ + public static CustomAnalyzerBuilder buildCustomAnalyzer(Map tokenizerSettings) { + return new CustomAnalyzerBuilder(null, new NameOrDefinition(tokenizerSettings)); + } + + /** + * Analyzes text using a custom analyzer built from components defined on an index + */ + public static CustomAnalyzerBuilder buildCustomAnalyzer(String index, String tokenizer) { + return new CustomAnalyzerBuilder(index, new NameOrDefinition(tokenizer)); + } + + /** + * Analyzes text using a custom analyzer built from components defined on an index + */ + public static CustomAnalyzerBuilder buildCustomAnalyzer(String index, Map tokenizerSettings) { + return new CustomAnalyzerBuilder(index, new NameOrDefinition(tokenizerSettings)); + } + + /** + * Analyzes text using a named analyzer on an index + */ + public static AnalyzeRequest withIndexAnalyzer(String index, String analyzer, String... text) { + return new AnalyzeRequest(index, analyzer, null, null, text); + } + + /** + * Analyzes text using the analyzer defined on a specific field within an index + */ + public static AnalyzeRequest withField(String index, String field, String... text) { + return new AnalyzeRequest(index, null, null, field, text); + } + + /** + * Analyzes text using a named normalizer on an index + */ + public static AnalyzeRequest withNormalizer(String index, String normalizer, String... text) { + return new AnalyzeRequest(index, null, normalizer, null, text); + } + + /** + * Analyzes text using a custom normalizer built from global components + */ + public static CustomAnalyzerBuilder buildCustomNormalizer() { + return new CustomAnalyzerBuilder(null, null); + } + + /** + * Analyzes text using a custom normalizer built from components defined on an index + */ + public static CustomAnalyzerBuilder buildCustomNormalizer(String index) { + return new CustomAnalyzerBuilder(index, null); + } + + /** + * Helper class to build custom analyzer definitions + */ + public static class CustomAnalyzerBuilder { + + final NameOrDefinition tokenizer; + final String index; + List charFilters = new ArrayList<>(); + List tokenFilters = new ArrayList<>(); + + CustomAnalyzerBuilder(String index, NameOrDefinition tokenizer) { + this.tokenizer = tokenizer; + this.index = index; + } + + public CustomAnalyzerBuilder addCharFilter(String name) { + charFilters.add(new NameOrDefinition(name)); + return this; + } + + public CustomAnalyzerBuilder addCharFilter(Map settings) { + charFilters.add(new NameOrDefinition(settings)); + return this; + } + + public CustomAnalyzerBuilder addTokenFilter(String name) { + tokenFilters.add(new NameOrDefinition(name)); + return this; + } + + public CustomAnalyzerBuilder addTokenFilter(Map settings) { + tokenFilters.add(new NameOrDefinition(settings)); + return this; + } + + public AnalyzeRequest build(String... text) { + return new AnalyzeRequest(index, tokenizer, charFilters, tokenFilters, text); + } + } + + private AnalyzeRequest(String index, String analyzer, String normalizer, String field, String... text) { + this.index = index; + this.analyzer = analyzer; + this.normalizer = normalizer; + this.field = field; + this.text = text; + } + + private AnalyzeRequest(String index, NameOrDefinition tokenizer, List charFilters, + List tokenFilters, String... text) { + this.index = index; + this.analyzer = null; + this.normalizer = null; + this.field = null; + this.tokenizer = tokenizer; + this.charFilters.addAll(charFilters); + this.tokenFilters.addAll(tokenFilters); + this.text = text; + } + + static class NameOrDefinition implements ToXContentFragment { + // exactly one of these two members is not null + public final String name; + public final Settings definition; + + NameOrDefinition(String name) { + this.name = Objects.requireNonNull(name); + this.definition = null; + } + + NameOrDefinition(Settings settings) { + this.name = null; + this.definition = Objects.requireNonNull(settings); + } + + NameOrDefinition(Map definition) { + this.name = null; + Objects.requireNonNull(definition); + try { + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + builder.map(definition); + this.definition = Settings.builder().loadFromSource(Strings.toString(builder), builder.contentType()).build(); + } catch (IOException e) { + throw new IllegalArgumentException("Failed to parse [" + definition + "]", e); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (definition == null) { + return builder.value(name); + } + builder.startObject(); + definition.toXContent(builder, params); + builder.endObject(); + return builder; + } + + } + + /** + * Returns the index that the request should be executed against, or {@code null} if + * no index is specified + */ + public String index() { + return this.index; + } + + /** + * Returns the text to be analyzed + */ + public String[] text() { + return this.text; + } + + /** + * Returns the named analyzer used for analysis, if defined + */ + public String analyzer() { + return this.analyzer; + } + + /** + * Returns the named tokenizer used for analysis, if defined + */ + public String normalizer() { + return this.normalizer; + } + + /** + * Returns a custom Tokenizer used for analysis, if defined + */ + public NameOrDefinition tokenizer() { + return this.tokenizer; + } + + /** + * Returns the custom token filters used for analysis, if defined + */ + public List tokenFilters() { + return this.tokenFilters; + } + + /** + * Returns the custom character filters used for analysis, if defined + */ + public List charFilters() { + return this.charFilters; + } + + /** + * Returns the field to take an Analyzer from, if defined + */ + public String field() { + return this.field; + } + + /** + * Set whether or not detailed explanations of analysis should be returned + */ + public AnalyzeRequest explain(boolean explain) { + this.explain = explain; + return this; + } + + public boolean explain() { + return this.explain; + } + + public AnalyzeRequest attributes(String... attributes) { + if (attributes == null) { + throw new IllegalArgumentException("attributes must not be null"); + } + this.attributes = attributes; + return this; + } + + public String[] attributes() { + return this.attributes; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("text", text); + if (Strings.isNullOrEmpty(analyzer) == false) { + builder.field("analyzer", analyzer); + } + if (tokenizer != null) { + builder.field("tokenizer", tokenizer); + } + if (tokenFilters.size() > 0) { + builder.field("filter", tokenFilters); + } + if (charFilters.size() > 0) { + builder.field("char_filter", charFilters); + } + if (Strings.isNullOrEmpty(field) == false) { + builder.field("field", field); + } + if (explain) { + builder.field("explain", true); + } + if (attributes.length > 0) { + builder.field("attributes", attributes); + } + if (Strings.isNullOrEmpty(normalizer) == false) { + builder.field("normalizer", normalizer); + } + return builder.endObject(); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/AnalyzeResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/AnalyzeResponse.java new file mode 100644 index 0000000000000..aaba8653dee84 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/AnalyzeResponse.java @@ -0,0 +1,183 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class AnalyzeResponse { + + private static final String TOKENS = "tokens"; + private static final String DETAIL = "detail"; + + public static class AnalyzeToken { + private String term; + private int startOffset; + private int endOffset; + private int position; + private int positionLength = 1; + private String type; + private final Map attributes = new HashMap<>(); + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AnalyzeResponse.AnalyzeToken that = (AnalyzeResponse.AnalyzeToken) o; + return startOffset == that.startOffset && + endOffset == that.endOffset && + position == that.position && + positionLength == that.positionLength && + Objects.equals(term, that.term) && + Objects.equals(attributes, that.attributes) && + Objects.equals(type, that.type); + } + + @Override + public int hashCode() { + return Objects.hash(term, startOffset, endOffset, position, positionLength, attributes, type); + } + + public String getTerm() { + return this.term; + } + + private void setTerm(String term) { + this.term = term; + } + + public int getStartOffset() { + return this.startOffset; + } + + private void setStartOffset(int startOffset) { + this.startOffset = startOffset; + } + + public int getEndOffset() { + return this.endOffset; + } + + private void setEndOffset(int endOffset) { + this.endOffset = endOffset; + } + + public int getPosition() { + return this.position; + } + + private void setPosition(int position) { + this.position = position; + } + + public int getPositionLength() { + return this.positionLength; + } + + private void setPositionLength(int positionLength) { + this.positionLength = positionLength; + } + + public String getType() { + return this.type; + } + + private void setType(String type) { + this.type = type; + } + + public Map getAttributes() { + return this.attributes; + } + + private void setAttribute(String key, Object value) { + this.attributes.put(key, value); + } + + private static final ObjectParser PARSER + = new ObjectParser<>("analyze_token", AnalyzeToken::setAttribute, AnalyzeToken::new); + static { + PARSER.declareString(AnalyzeToken::setTerm, new ParseField("token")); + PARSER.declareString(AnalyzeToken::setType, new ParseField("type")); + PARSER.declareInt(AnalyzeToken::setPosition, new ParseField("position")); + PARSER.declareInt(AnalyzeToken::setStartOffset, new ParseField("start_offset")); + PARSER.declareInt(AnalyzeToken::setEndOffset, new ParseField("end_offset")); + PARSER.declareInt(AnalyzeToken::setPositionLength, new ParseField("positionLength")); + } + + public static AnalyzeToken fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } + + private final DetailAnalyzeResponse detail; + private final List tokens; + + private AnalyzeResponse(List tokens, DetailAnalyzeResponse detail) { + this.tokens = tokens; + this.detail = detail; + } + + public List getTokens() { + return this.tokens; + } + + public DetailAnalyzeResponse detail() { + return this.detail; + } + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("analyze_response", + true, args -> new AnalyzeResponse((List) args[0], (DetailAnalyzeResponse) args[1])); + + static { + PARSER.declareObjectArray(optionalConstructorArg(), AnalyzeToken.PARSER, new ParseField(TOKENS)); + PARSER.declareObject(optionalConstructorArg(), DetailAnalyzeResponse.PARSER, new ParseField(DETAIL)); + } + + public static AnalyzeResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AnalyzeResponse that = (AnalyzeResponse) o; + return Objects.equals(detail, that.detail) && + Objects.equals(tokens, that.tokens); + } + + @Override + public int hashCode() { + return Objects.hash(detail, tokens); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/DetailAnalyzeResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/DetailAnalyzeResponse.java new file mode 100644 index 0000000000000..36cf8afad0d58 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/DetailAnalyzeResponse.java @@ -0,0 +1,214 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class DetailAnalyzeResponse { + + private final boolean customAnalyzer; + private final AnalyzeTokenList analyzer; + private final CharFilteredText[] charfilters; + private final AnalyzeTokenList tokenizer; + private final AnalyzeTokenList[] tokenfilters; + + private DetailAnalyzeResponse(boolean customAnalyzer, + AnalyzeTokenList analyzer, + List charfilters, + AnalyzeTokenList tokenizer, + List tokenfilters) { + this.customAnalyzer = customAnalyzer; + this.analyzer = analyzer; + this.charfilters = charfilters == null ? null : charfilters.toArray(new CharFilteredText[]{}); + this.tokenizer = tokenizer; + this.tokenfilters = tokenfilters == null ? null : tokenfilters.toArray(new AnalyzeTokenList[]{}); + } + + public AnalyzeTokenList analyzer() { + return this.analyzer; + } + + public CharFilteredText[] charfilters() { + return this.charfilters; + } + + public AnalyzeTokenList tokenizer() { + return tokenizer; + } + + public AnalyzeTokenList[] tokenfilters() { + return tokenfilters; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DetailAnalyzeResponse that = (DetailAnalyzeResponse) o; + return customAnalyzer == that.customAnalyzer && + Objects.equals(analyzer, that.analyzer) && + Arrays.equals(charfilters, that.charfilters) && + Objects.equals(tokenizer, that.tokenizer) && + Arrays.equals(tokenfilters, that.tokenfilters); + } + + @Override + public int hashCode() { + int result = Objects.hash(customAnalyzer, analyzer, tokenizer); + result = 31 * result + Arrays.hashCode(charfilters); + result = 31 * result + Arrays.hashCode(tokenfilters); + return result; + } + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("detail", + true, args -> new DetailAnalyzeResponse( + (boolean) args[0], + (AnalyzeTokenList) args[1], + (List)args[2], + (AnalyzeTokenList) args[3], + (List)args[4])); + + static { + PARSER.declareBoolean(constructorArg(), new ParseField("custom_analyzer")); + PARSER.declareObject(optionalConstructorArg(), AnalyzeTokenList.PARSER, new ParseField("analyzer")); + PARSER.declareObjectArray(optionalConstructorArg(), CharFilteredText.PARSER, new ParseField("charfilters")); + PARSER.declareObject(optionalConstructorArg(), AnalyzeTokenList.PARSER, new ParseField("tokenizer")); + PARSER.declareObjectArray(optionalConstructorArg(), AnalyzeTokenList.PARSER, new ParseField("tokenfilters")); + } + + public static DetailAnalyzeResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public static class AnalyzeTokenList { + private final String name; + private final AnalyzeResponse.AnalyzeToken[] tokens; + + private static final String TOKENS = "tokens"; + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AnalyzeTokenList that = (AnalyzeTokenList) o; + return Objects.equals(name, that.name) && + Arrays.equals(tokens, that.tokens); + } + + @Override + public int hashCode() { + int result = Objects.hash(name); + result = 31 * result + Arrays.hashCode(tokens); + return result; + } + + public AnalyzeTokenList(String name, List tokens) { + this.name = name; + this.tokens = tokens.toArray(new AnalyzeResponse.AnalyzeToken[]{}); + } + + public String getName() { + return name; + } + + public AnalyzeResponse.AnalyzeToken[] getTokens() { + return tokens; + } + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("token_list", + true, args -> new AnalyzeTokenList((String) args[0], + (List)args[1])); + + static { + PARSER.declareString(constructorArg(), new ParseField("name")); + PARSER.declareObjectArray(constructorArg(), (p, c) -> AnalyzeResponse.AnalyzeToken.fromXContent(p), + new ParseField("tokens")); + } + + public static AnalyzeTokenList fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + } + + public static class CharFilteredText { + private final String name; + private final String[] texts; + + CharFilteredText(String name, String[] texts) { + this.name = name; + if (texts != null) { + this.texts = texts; + } else { + this.texts = Strings.EMPTY_ARRAY; + } + } + + public String getName() { + return name; + } + + public String[] getTexts() { + return texts; + } + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("char_filtered_text", + true, args -> new CharFilteredText((String) args[0], ((List) args[1]).toArray(new String[0]))); + + static { + PARSER.declareString(constructorArg(), new ParseField("name")); + PARSER.declareStringArray(constructorArg(), new ParseField("filtered_text")); + } + + public static CharFilteredText fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CharFilteredText that = (CharFilteredText) o; + return Objects.equals(name, that.name) && + Arrays.equals(texts, that.texts); + } + + @Override + public int hashCode() { + int result = Objects.hash(name); + result = 31 * result + Arrays.hashCode(texts); + return result; + } + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FindFileStructureRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FindFileStructureRequest.java index adfee92bd6171..fed417e9582d8 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FindFileStructureRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FindFileStructureRequest.java @@ -37,6 +37,7 @@ public class FindFileStructureRequest implements Validatable, ToXContentFragment { public static final ParseField LINES_TO_SAMPLE = new ParseField("lines_to_sample"); + public static final ParseField LINE_MERGE_SIZE_LIMIT = new ParseField("line_merge_size_limit"); public static final ParseField TIMEOUT = new ParseField("timeout"); public static final ParseField CHARSET = FileStructure.CHARSET; public static final ParseField FORMAT = FileStructure.FORMAT; @@ -52,6 +53,7 @@ public class FindFileStructureRequest implements Validatable, ToXContentFragment public static final ParseField EXPLAIN = new ParseField("explain"); private Integer linesToSample; + private Integer lineMergeSizeLimit; private TimeValue timeout; private String charset; private FileStructure.Format format; @@ -77,6 +79,14 @@ public void setLinesToSample(Integer linesToSample) { this.linesToSample = linesToSample; } + public Integer getLineMergeSizeLimit() { + return lineMergeSizeLimit; + } + + public void setLineMergeSizeLimit(Integer lineMergeSizeLimit) { + this.lineMergeSizeLimit = lineMergeSizeLimit; + } + public TimeValue getTimeout() { return timeout; } @@ -228,6 +238,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (linesToSample != null) { builder.field(LINES_TO_SAMPLE.getPreferredName(), linesToSample); } + if (lineMergeSizeLimit != null) { + builder.field(LINE_MERGE_SIZE_LIMIT.getPreferredName(), lineMergeSizeLimit); + } if (timeout != null) { builder.field(TIMEOUT.getPreferredName(), timeout); } @@ -270,8 +283,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public int hashCode() { - return Objects.hash(linesToSample, timeout, charset, format, columnNames, hasHeaderRow, delimiter, grokPattern, timestampFormat, - timestampField, explain, sample); + return Objects.hash(linesToSample, lineMergeSizeLimit, timeout, charset, format, columnNames, hasHeaderRow, delimiter, grokPattern, + timestampFormat, timestampField, explain, sample); } @Override @@ -287,6 +300,7 @@ public boolean equals(Object other) { FindFileStructureRequest that = (FindFileStructureRequest) other; return Objects.equals(this.linesToSample, that.linesToSample) && + Objects.equals(this.lineMergeSizeLimit, that.lineMergeSizeLimit) && Objects.equals(this.timeout, that.timeout) && Objects.equals(this.charset, that.charset) && Objects.equals(this.format, that.format) && diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index d9adf61782b3d..458e6371010b0 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -28,8 +28,6 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; @@ -58,6 +56,8 @@ import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.core.ShardsAcknowledgedResponse; +import org.elasticsearch.client.indices.AnalyzeRequest; +import org.elasticsearch.client.indices.AnalyzeResponse; import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.indices.CreateIndexResponse; import org.elasticsearch.client.indices.FreezeIndexRequest; @@ -1852,12 +1852,12 @@ public void testAnalyze() throws Exception { RestHighLevelClient client = highLevelClient(); - AnalyzeRequest noindexRequest = new AnalyzeRequest().text("One two three").analyzer("english"); + AnalyzeRequest noindexRequest = AnalyzeRequest.withGlobalAnalyzer("english", "One two three"); AnalyzeResponse noindexResponse = execute(noindexRequest, client.indices()::analyze, client.indices()::analyzeAsync); assertThat(noindexResponse.getTokens(), hasSize(3)); - AnalyzeRequest detailsRequest = new AnalyzeRequest().text("One two three").analyzer("english").explain(true); + AnalyzeRequest detailsRequest = AnalyzeRequest.withGlobalAnalyzer("english", "One two three").explain(true); AnalyzeResponse detailsResponse = execute(detailsRequest, client.indices()::analyze, client.indices()::analyzeAsync); assertNotNull(detailsResponse.detail()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java index f7d5ac51a73ac..8f52dd7b00b6a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; @@ -45,6 +44,7 @@ import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.client.indices.AnalyzeRequest; import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.indices.GetFieldMappingsRequest; import org.elasticsearch.client.indices.GetIndexRequest; @@ -86,18 +86,14 @@ public class IndicesRequestConvertersTests extends ESTestCase { public void testAnalyzeRequest() throws Exception { - AnalyzeRequest indexAnalyzeRequest = new AnalyzeRequest() - .text("Here is some text") - .index("test_index") - .analyzer("test_analyzer"); + AnalyzeRequest indexAnalyzeRequest + = AnalyzeRequest.withIndexAnalyzer("test_index", "test_analyzer", "Here is some text"); Request request = IndicesRequestConverters.analyze(indexAnalyzeRequest); assertThat(request.getEndpoint(), equalTo("/test_index/_analyze")); RequestConvertersTests.assertToXContentBody(indexAnalyzeRequest, request.getEntity()); - AnalyzeRequest analyzeRequest = new AnalyzeRequest() - .text("more text") - .analyzer("test_analyzer"); + AnalyzeRequest analyzeRequest = AnalyzeRequest.withGlobalAnalyzer("test_analyzer", "more text"); assertThat(IndicesRequestConverters.analyze(analyzeRequest).getEndpoint(), equalTo("/_analyze")); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 46d929e27d988..74ddf83d2146a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.delete.DeleteRequest; @@ -56,6 +55,7 @@ import org.elasticsearch.client.core.CountRequest; import org.elasticsearch.client.core.MultiTermVectorsRequest; import org.elasticsearch.client.core.TermVectorsRequest; +import org.elasticsearch.client.indices.AnalyzeRequest; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -1612,18 +1612,14 @@ public void testPutScript() throws Exception { } public void testAnalyzeRequest() throws Exception { - AnalyzeRequest indexAnalyzeRequest = new AnalyzeRequest() - .text("Here is some text") - .index("test_index") - .analyzer("test_analyzer"); + AnalyzeRequest indexAnalyzeRequest + = AnalyzeRequest.withIndexAnalyzer("test_index", "test_analyzer", "Here is some text"); Request request = RequestConverters.analyze(indexAnalyzeRequest); assertThat(request.getEndpoint(), equalTo("/test_index/_analyze")); assertToXContentBody(indexAnalyzeRequest, request.getEntity()); - AnalyzeRequest analyzeRequest = new AnalyzeRequest() - .text("more text") - .analyzer("test_analyzer"); + AnalyzeRequest analyzeRequest = AnalyzeRequest.withGlobalAnalyzer("test_analyzer", "more text"); assertThat(RequestConverters.analyze(analyzeRequest).getEndpoint(), equalTo("/_analyze")); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 14def60b277e8..8e0a3d2fd005b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -26,9 +26,6 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; -import org.elasticsearch.action.admin.indices.analyze.DetailAnalyzeResponse; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; @@ -62,8 +59,11 @@ import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.SyncedFlushResponse; import org.elasticsearch.client.core.ShardsAcknowledgedResponse; +import org.elasticsearch.client.indices.AnalyzeRequest; +import org.elasticsearch.client.indices.AnalyzeResponse; import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.indices.CreateIndexResponse; +import org.elasticsearch.client.indices.DetailAnalyzeResponse; import org.elasticsearch.client.indices.FreezeIndexRequest; import org.elasticsearch.client.indices.GetFieldMappingsRequest; import org.elasticsearch.client.indices.GetFieldMappingsResponse; @@ -2418,32 +2418,29 @@ public void testAnalyze() throws IOException, InterruptedException { { // tag::analyze-builtin-request - AnalyzeRequest request = new AnalyzeRequest(); - request.text("Some text to analyze", "Some more text to analyze"); // <1> - request.analyzer("english"); // <2> + AnalyzeRequest request = AnalyzeRequest.withGlobalAnalyzer("english", // <1> + "Some text to analyze", "Some more text to analyze"); // <2> // end::analyze-builtin-request } { // tag::analyze-custom-request - AnalyzeRequest request = new AnalyzeRequest(); - request.text("Some text to analyze"); - request.addCharFilter("html_strip"); // <1> - request.tokenizer("standard"); // <2> - request.addTokenFilter("lowercase"); // <3> - Map stopFilter = new HashMap<>(); stopFilter.put("type", "stop"); - stopFilter.put("stopwords", new String[]{ "to" }); // <4> - request.addTokenFilter(stopFilter); // <5> + stopFilter.put("stopwords", new String[]{ "to" }); // <1> + AnalyzeRequest request = AnalyzeRequest.buildCustomAnalyzer("standard") // <2> + .addCharFilter("html_strip") // <3> + .addTokenFilter("lowercase") // <4> + .addTokenFilter(stopFilter) // <5> + .build("Some text to analyze"); // end::analyze-custom-request } { // tag::analyze-custom-normalizer-request - AnalyzeRequest request = new AnalyzeRequest(); - request.text("BaR"); - request.addTokenFilter("lowercase"); + AnalyzeRequest request = AnalyzeRequest.buildCustomNormalizer() + .addTokenFilter("lowercase") + .build("BaR"); // end::analyze-custom-normalizer-request // tag::analyze-request-explain @@ -2484,10 +2481,11 @@ public void testAnalyze() throws IOException, InterruptedException { { // tag::analyze-index-request - AnalyzeRequest request = new AnalyzeRequest(); - request.index("my_index"); // <1> - request.analyzer("my_analyzer"); // <2> - request.text("some text to analyze"); + AnalyzeRequest request = AnalyzeRequest.withIndexAnalyzer( + "my_index", // <1> + "my_analyzer", // <2> + "some text to analyze" + ); // end::analyze-index-request // tag::analyze-execute-listener @@ -2505,10 +2503,7 @@ public void onFailure(Exception e) { // end::analyze-execute-listener // use a built-in analyzer in the test - request = new AnalyzeRequest(); - request.index("my_index"); - request.field("my_field"); - request.text("some text to analyze"); + request = AnalyzeRequest.withField("my_index", "my_field", "some text to analyze"); // Use a blocking listener in the test final CountDownLatch latch = new CountDownLatch(1); listener = new LatchedActionListener<>(listener, latch); @@ -2522,19 +2517,17 @@ public void onFailure(Exception e) { { // tag::analyze-index-normalizer-request - AnalyzeRequest request = new AnalyzeRequest(); - request.index("my_index"); // <1> - request.normalizer("my_normalizer"); // <2> - request.text("some text to analyze"); + AnalyzeRequest request = AnalyzeRequest.withNormalizer( + "my_index", // <1> + "my_normalizer", // <2> + "some text to analyze" + ); // end::analyze-index-normalizer-request } { // tag::analyze-field-request - AnalyzeRequest request = new AnalyzeRequest(); - request.index("my_index"); - request.field("my_field"); - request.text("some text to analyze"); + AnalyzeRequest request = AnalyzeRequest.withField("my_index", "my_field", "some text to analyze"); // end::analyze-field-request } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/QueryDSLDocumentationTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/QueryDSLDocumentationTests.java index 51670b29de1b6..3530e63e47e1d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/QueryDSLDocumentationTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/QueryDSLDocumentationTests.java @@ -42,7 +42,6 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.boostingQuery; -import static org.elasticsearch.index.query.QueryBuilders.commonTermsQuery; import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.disMaxQuery; import static org.elasticsearch.index.query.QueryBuilders.existsQuery; @@ -106,13 +105,6 @@ public void testBoosting() { // end::boosting } - public void testCommonTerms() { - // tag::common_terms - commonTermsQuery("name", // <1> - "kimchy"); // <2> - // end::common_terms - } - public void testConstantScore() { // tag::constant_score constantScoreQuery( diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeGlobalRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeGlobalRequestTests.java new file mode 100644 index 0000000000000..a18971d28fe2d --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeGlobalRequestTests.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; + +public class AnalyzeGlobalRequestTests extends AnalyzeRequestTests { + + @Override + protected AnalyzeRequest createClientTestInstance() { + int option = random().nextInt(3); + switch (option) { + case 0: + return AnalyzeRequest.withGlobalAnalyzer("my_analyzer", "some text", "some more text"); + case 1: + return AnalyzeRequest.buildCustomAnalyzer("my_tokenizer") + .addCharFilter("my_char_filter") + .addCharFilter(Map.of("type", "html_strip")) + .addTokenFilter("my_token_filter") + .addTokenFilter(Map.of("type", "synonym")) + .build("some text", "some more text"); + case 2: + return AnalyzeRequest.buildCustomNormalizer() + .addCharFilter("my_char_filter") + .addCharFilter(Map.of("type", "html_strip")) + .addTokenFilter("my_token_filter") + .addTokenFilter(Map.of("type", "synonym")) + .build("some text", "some more text"); + } + throw new IllegalStateException("nextInt(3) has returned a value greater than 2"); + } + + @Override + protected AnalyzeAction.Request doParseToServerInstance(XContentParser parser) throws IOException { + return AnalyzeAction.Request.fromXContent(parser, null); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeIndexRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeIndexRequestTests.java new file mode 100644 index 0000000000000..7cf271e89ac48 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeIndexRequestTests.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; + +public class AnalyzeIndexRequestTests extends AnalyzeRequestTests { + + @Override + protected AnalyzeRequest createClientTestInstance() { + int option = random().nextInt(5); + switch (option) { + case 0: + return AnalyzeRequest.withField("index", "field", "some text", "some more text"); + case 1: + return AnalyzeRequest.withIndexAnalyzer("index", "my_analyzer", "some text", "some more text"); + case 2: + return AnalyzeRequest.withNormalizer("index", "my_normalizer", "text", "more text"); + case 3: + return AnalyzeRequest.buildCustomAnalyzer("index", "my_tokenizer") + .addCharFilter("my_char_filter") + .addCharFilter(Map.of("type", "html_strip")) + .addTokenFilter("my_token_filter") + .addTokenFilter(Map.of("type", "synonym")) + .build("some text", "some more text"); + case 4: + return AnalyzeRequest.buildCustomNormalizer("index") + .addCharFilter("my_char_filter") + .addCharFilter(Map.of("type", "html_strip")) + .addTokenFilter("my_token_filter") + .addTokenFilter(Map.of("type", "synonym")) + .build("some text", "some more text"); + } + throw new IllegalStateException("nextInt(5) has returned a value greater than 4"); + } + + @Override + protected AnalyzeAction.Request doParseToServerInstance(XContentParser parser) throws IOException { + return AnalyzeAction.Request.fromXContent(parser, "index"); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeRequestTests.java new file mode 100644 index 0000000000000..50a339fc8058a --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeRequestTests.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; +import org.elasticsearch.client.AbstractRequestTestCase; + +public abstract class AnalyzeRequestTests extends AbstractRequestTestCase { + + @Override + protected void assertInstances(AnalyzeAction.Request serverInstance, AnalyzeRequest clientTestInstance) { + assertEquals(serverInstance.index(), clientTestInstance.index()); + assertArrayEquals(serverInstance.text(), clientTestInstance.text()); + assertEquals(serverInstance.analyzer(), clientTestInstance.analyzer()); + assertEquals(serverInstance.normalizer(), clientTestInstance.normalizer()); + assertEquals(serverInstance.charFilters().size(), clientTestInstance.charFilters().size()); + for (int i = 0; i < serverInstance.charFilters().size(); i++) { + assertEquals(serverInstance.charFilters().get(i).name, clientTestInstance.charFilters().get(i).name); + assertEquals(serverInstance.charFilters().get(i).definition, clientTestInstance.charFilters().get(i).definition); + } + assertEquals(serverInstance.tokenFilters().size(), clientTestInstance.tokenFilters().size()); + for (int i = 0; i < serverInstance.tokenFilters().size(); i++) { + assertEquals(serverInstance.tokenFilters().get(i).name, clientTestInstance.tokenFilters().get(i).name); + assertEquals(serverInstance.tokenFilters().get(i).definition, clientTestInstance.tokenFilters().get(i).definition); + } + if (serverInstance.tokenizer() != null) { + assertEquals(serverInstance.tokenizer().name, clientTestInstance.tokenizer().name); + assertEquals(serverInstance.tokenizer().definition, clientTestInstance.tokenizer().definition); + } + else { + assertNull(clientTestInstance.tokenizer()); + } + assertEquals(serverInstance.field(), clientTestInstance.field()); + assertEquals(serverInstance.explain(), clientTestInstance.explain()); + assertArrayEquals(serverInstance.attributes(), clientTestInstance.attributes()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeResponseTests.java new file mode 100644 index 0000000000000..e29fa88d7fe3e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/AnalyzeResponseTests.java @@ -0,0 +1,174 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; +import org.elasticsearch.client.AbstractResponseTestCase; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class AnalyzeResponseTests extends AbstractResponseTestCase { + + @Override + protected AnalyzeAction.Response createServerTestInstance() { + int tokenCount = randomIntBetween(1, 30); + AnalyzeAction.AnalyzeToken[] tokens = new AnalyzeAction.AnalyzeToken[tokenCount]; + for (int i = 0; i < tokenCount; i++) { + tokens[i] = randomToken(); + } + if (randomBoolean()) { + AnalyzeAction.CharFilteredText[] charfilters = null; + AnalyzeAction.AnalyzeTokenList[] tokenfilters = null; + if (randomBoolean()) { + charfilters = new AnalyzeAction.CharFilteredText[]{ + new AnalyzeAction.CharFilteredText("my_charfilter", new String[]{"one two"}) + }; + } + if (randomBoolean()) { + tokenfilters = new AnalyzeAction.AnalyzeTokenList[]{ + new AnalyzeAction.AnalyzeTokenList("my_tokenfilter_1", tokens), + new AnalyzeAction.AnalyzeTokenList("my_tokenfilter_2", tokens) + }; + } + AnalyzeAction.DetailAnalyzeResponse dar = new AnalyzeAction.DetailAnalyzeResponse( + charfilters, + new AnalyzeAction.AnalyzeTokenList("my_tokenizer", tokens), + tokenfilters); + return new AnalyzeAction.Response(null, dar); + } + return new AnalyzeAction.Response(Arrays.asList(tokens), null); + } + + private AnalyzeAction.AnalyzeToken randomToken() { + String token = randomAlphaOfLengthBetween(1, 20); + int position = randomIntBetween(0, 1000); + int startOffset = randomIntBetween(0, 1000); + int endOffset = randomIntBetween(0, 1000); + int posLength = randomIntBetween(1, 5); + String type = randomAlphaOfLengthBetween(1, 20); + Map extras = new HashMap<>(); + if (randomBoolean()) { + int entryCount = randomInt(6); + for (int i = 0; i < entryCount; i++) { + switch (randomInt(6)) { + case 0: + case 1: + case 2: + case 3: + String key = randomAlphaOfLength(5); + String value = randomAlphaOfLength(10); + extras.put(key, value); + break; + case 4: + String objkey = randomAlphaOfLength(5); + Map obj = new HashMap<>(); + obj.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); + extras.put(objkey, obj); + break; + case 5: + String listkey = randomAlphaOfLength(5); + List list = new ArrayList<>(); + list.add(randomAlphaOfLength(4)); + list.add(randomAlphaOfLength(6)); + extras.put(listkey, list); + break; + } + } + } + return new AnalyzeAction.AnalyzeToken(token, position, startOffset, endOffset, posLength, type, extras); + } + + @Override + protected AnalyzeResponse doParseToClientInstance(XContentParser parser) throws IOException { + return AnalyzeResponse.fromXContent(parser); + } + + @Override + protected void assertInstances(AnalyzeAction.Response serverTestInstance, AnalyzeResponse clientInstance) { + if (serverTestInstance.detail() != null) { + assertNotNull(clientInstance.detail()); + assertInstances(serverTestInstance.detail(), clientInstance.detail()); + } + else { + assertEquals(serverTestInstance.getTokens().size(), clientInstance.getTokens().size()); + for (int i = 0; i < serverTestInstance.getTokens().size(); i++) { + assertEqualTokens(serverTestInstance.getTokens().get(0), clientInstance.getTokens().get(0)); + } + } + } + + private static void assertEqualTokens(AnalyzeAction.AnalyzeToken serverToken, AnalyzeResponse.AnalyzeToken clientToken) { + assertEquals(serverToken.getTerm(), clientToken.getTerm()); + assertEquals(serverToken.getPosition(), clientToken.getPosition()); + assertEquals(serverToken.getPositionLength(), clientToken.getPositionLength()); + assertEquals(serverToken.getStartOffset(), clientToken.getStartOffset()); + assertEquals(serverToken.getEndOffset(), clientToken.getEndOffset()); + assertEquals(serverToken.getType(), clientToken.getType()); + assertEquals(serverToken.getAttributes(), clientToken.getAttributes()); + } + + private static void assertInstances(AnalyzeAction.DetailAnalyzeResponse serverResponse, DetailAnalyzeResponse clientResponse) { + assertInstances(serverResponse.analyzer(), clientResponse.analyzer()); + assertInstances(serverResponse.tokenizer(), clientResponse.tokenizer()); + if (serverResponse.tokenfilters() == null) { + assertNull(clientResponse.tokenfilters()); + } + else { + assertEquals(serverResponse.tokenfilters().length, clientResponse.tokenfilters().length); + for (int i = 0; i < serverResponse.tokenfilters().length; i++) { + assertInstances(serverResponse.tokenfilters()[i], clientResponse.tokenfilters()[i]); + } + } + if (serverResponse.charfilters() == null) { + assertNull(clientResponse.charfilters()); + } + else { + assertEquals(serverResponse.charfilters().length, clientResponse.charfilters().length); + for (int i = 0; i < serverResponse.charfilters().length; i++) { + assertInstances(serverResponse.charfilters()[i], clientResponse.charfilters()[i]); + } + } + } + + private static void assertInstances(AnalyzeAction.AnalyzeTokenList serverTokens, + DetailAnalyzeResponse.AnalyzeTokenList clientTokens) { + if (serverTokens == null) { + assertNull(clientTokens); + } + else { + assertEquals(serverTokens.getName(), clientTokens.getName()); + assertEquals(serverTokens.getTokens().length, clientTokens.getTokens().length); + for (int i = 0; i < serverTokens.getTokens().length; i++) { + assertEqualTokens(serverTokens.getTokens()[i], clientTokens.getTokens()[i]); + } + } + } + + private static void assertInstances(AnalyzeAction.CharFilteredText serverText, DetailAnalyzeResponse.CharFilteredText clientText) { + assertEquals(serverText.getName(), clientText.getName()); + assertArrayEquals(serverText.getTexts(), clientText.getTexts()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FindFileStructureRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FindFileStructureRequestTests.java index 4cb8bf0a7c166..752d0593bef95 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FindFileStructureRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FindFileStructureRequestTests.java @@ -35,6 +35,7 @@ public class FindFileStructureRequestTests extends AbstractXContentTestCase p.setTimeout(TimeValue.parseTimeValue(c, FindFileStructureRequest.TIMEOUT.getPreferredName())), FindFileStructureRequest.TIMEOUT); PARSER.declareString(FindFileStructureRequest::setCharset, FindFileStructureRequest.CHARSET); @@ -72,6 +73,9 @@ public static FindFileStructureRequest createTestRequestWithoutSample() { if (randomBoolean()) { findFileStructureRequest.setLinesToSample(randomIntBetween(1000, 2000)); } + if (randomBoolean()) { + findFileStructureRequest.setLineMergeSizeLimit(randomIntBetween(10000, 20000)); + } if (randomBoolean()) { findFileStructureRequest.setTimeout(TimeValue.timeValueSeconds(randomIntBetween(10, 20))); } diff --git a/client/rest/build.gradle b/client/rest/build.gradle index ee0317457118d..352b15699d081 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -44,7 +44,7 @@ dependencies { compile "commons-codec:commons-codec:${versions.commonscodec}" compile "commons-logging:commons-logging:${versions.commonslogging}" - testCompile "org.elasticsearch.client:test:${version}" + testCompile project(":client:test") testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest:${versions.hamcrest}" @@ -68,7 +68,7 @@ forbiddenApisTest { } // JarHell is part of es server, which we don't want to pull in -// TODO: Not anymore. Now in :libs:core +// TODO: Not anymore. Now in :libs:elasticsearch-core jarHell.enabled=false testingConventions { diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index 2f9eeca6020c4..8b4d21537a109 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -35,14 +35,14 @@ publishing { } dependencies { - compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" + compile project(":client:rest") compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "commons-codec:commons-codec:${versions.commonscodec}" compile "commons-logging:commons-logging:${versions.commonslogging}" compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - testCompile "org.elasticsearch.client:test:${version}" + testCompile project(":client:test") testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" testCompile "org.elasticsearch:securemock:${versions.securemock}" @@ -68,7 +68,7 @@ dependencyLicenses { } // JarHell is part of es server, which we don't want to pull in -// TODO: Not anymore. Now in :libs:core +// TODO: Not anymore. Now in :libs:elasticsearch-core jarHell.enabled=false testingConventions { diff --git a/client/test/build.gradle b/client/test/build.gradle index 184606e360791..169172736ecd3 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -44,7 +44,7 @@ forbiddenApisTest { } // JarHell is part of es server, which we don't want to pull in -// TODO: Not anymore. Now in :libs:core +// TODO: Not anymore. Now in :libs:elasticsearch-core jarHell.enabled=false // TODO: should we have licenses for our test deps? diff --git a/client/transport/build.gradle b/client/transport/build.gradle index c1e4503445bb7..36bd674018267 100644 --- a/client/transport/build.gradle +++ b/client/transport/build.gradle @@ -23,13 +23,13 @@ apply plugin: 'nebula.maven-scm' group = 'org.elasticsearch.client' dependencies { - compile "org.elasticsearch:elasticsearch:${version}" - compile "org.elasticsearch.plugin:transport-netty4-client:${version}" - compile "org.elasticsearch.plugin:reindex-client:${version}" - compile "org.elasticsearch.plugin:lang-mustache-client:${version}" - compile "org.elasticsearch.plugin:percolator-client:${version}" - compile "org.elasticsearch.plugin:parent-join-client:${version}" - compile "org.elasticsearch.plugin:rank-eval-client:${version}" + compile project(":server") + compile project(":modules:transport-netty4") + compile project(":modules:reindex") + compile project(":modules:lang-mustache") + compile project(":modules:percolator") + compile project(":modules:parent-join") + compile project(":modules:rank-eval") testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest:${versions.hamcrest}" diff --git a/distribution/build.gradle b/distribution/build.gradle index 9c98101da5be3..8e4b9de1b916a 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -241,7 +241,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { // delay by using closures, since they have not yet been configured, so no jar task exists yet from { project(':server').jar } from { project(':server').configurations.runtime } - from { project(':libs:plugin-classloader').jar } + from { project(':libs:elasticsearch-plugin-classloader').jar } from { project(':distribution:tools:java-version-checker').jar } from { project(':distribution:tools:launchers').jar } into('tools/plugin-cli') { diff --git a/distribution/tools/keystore-cli/build.gradle b/distribution/tools/keystore-cli/build.gradle index 5d57ef2e05597..de09e78e37493 100644 --- a/distribution/tools/keystore-cli/build.gradle +++ b/distribution/tools/keystore-cli/build.gradle @@ -20,9 +20,9 @@ apply plugin: 'elasticsearch.build' dependencies { - compileOnly "org.elasticsearch:elasticsearch:${version}" - compileOnly "org.elasticsearch:elasticsearch-cli:${version}" - testCompile "org.elasticsearch.test:framework:${version}" + compileOnly project(":server") + compileOnly project(":libs:elasticsearch-cli") + testCompile project(":test:framework") testCompile 'com.google.jimfs:jimfs:1.1' testCompile 'com.google.guava:guava:18.0' } diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 48bc899cd29b4..3db958c6ec41e 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -22,11 +22,11 @@ apply plugin: 'elasticsearch.build' archivesBaseName = 'elasticsearch-plugin-cli' dependencies { - compileOnly "org.elasticsearch:elasticsearch:${version}" - compileOnly "org.elasticsearch:elasticsearch-cli:${version}" + compileOnly project(":server") + compileOnly project(":libs:elasticsearch-cli") compile "org.bouncycastle:bcpg-jdk15on:${versions.bouncycastle}" compile "org.bouncycastle:bcprov-jdk15on:${versions.bouncycastle}" - testCompile "org.elasticsearch.test:framework:${version}" + testCompile project(":test:framework") testCompile 'com.google.jimfs:jimfs:1.1' testCompile 'com.google.guava:guava:18.0' } diff --git a/docs/java-rest/high-level/indices/analyze.asciidoc b/docs/java-rest/high-level/indices/analyze.asciidoc index 4978c9ebcca64..9464394fd1eb9 100644 --- a/docs/java-rest/high-level/indices/analyze.asciidoc +++ b/docs/java-rest/high-level/indices/analyze.asciidoc @@ -19,18 +19,18 @@ The simplest version uses a built-in analyzer: --------------------------------------------------- include-tagged::{doc-tests-file}[{api}-builtin-request] --------------------------------------------------- -<1> The text to include. Multiple strings are treated as a multi-valued field -<2> A built-in analyzer +<1> A built-in analyzer +<2> The text to include. Multiple strings are treated as a multi-valued field You can configure a custom analyzer: ["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- include-tagged::{doc-tests-file}[{api}-custom-request] --------------------------------------------------- -<1> Configure char filters +<1> Configuration for a custom tokenfilter <2> Configure the tokenizer -<3> Add a built-in tokenfilter -<4> Configuration for a custom tokenfilter +<3> Configure char filters +<4> Add a built-in tokenfilter <5> Add the custom tokenfilter You can also build a custom normalizer, by including only charfilters and diff --git a/docs/java-rest/high-level/query-builders.asciidoc b/docs/java-rest/high-level/query-builders.asciidoc index 53d9b9af97d12..f845a8c32e602 100644 --- a/docs/java-rest/high-level/query-builders.asciidoc +++ b/docs/java-rest/high-level/query-builders.asciidoc @@ -23,7 +23,6 @@ This page lists all the available search queries with their corresponding `Query | {ref}/query-dsl-match-query-phrase.html[Match Phrase] | {query-ref}/MatchPhraseQueryBuilder.html[MatchPhraseQueryBuilder] | {query-ref}/QueryBuilders.html#matchPhraseQuery-java.lang.String-java.lang.Object-[QueryBuilders.matchPhraseQuery()] | {ref}/query-dsl-match-query-phrase-prefix.html[Match Phrase Prefix] | {query-ref}/MatchPhrasePrefixQueryBuilder.html[MatchPhrasePrefixQueryBuilder] | {query-ref}/QueryBuilders.html#matchPhrasePrefixQuery-java.lang.String-java.lang.Object-[QueryBuilders.matchPhrasePrefixQuery()] | {ref}/query-dsl-multi-match-query.html[Multi Match] | {query-ref}/MultiMatchQueryBuilder.html[MultiMatchQueryBuilder] | {query-ref}/QueryBuilders.html#multiMatchQuery-java.lang.Object-java.lang.String\…-[QueryBuilders.multiMatchQuery()] -| {ref}/query-dsl-common-terms-query.html[Common Terms] | {query-ref}/CommonTermsQueryBuilder.html[CommonTermsQueryBuilder] | {query-ref}/QueryBuilders.html#commonTermsQuery-java.lang.String-java.lang.Object-[QueryBuilders.commonTermsQuery()] | {ref}/query-dsl-query-string-query.html[Query String] | {query-ref}/QueryStringQueryBuilder.html[QueryStringQueryBuilder] | {query-ref}/QueryBuilders.html#queryStringQuery-java.lang.String-[QueryBuilders.queryStringQuery()] | {ref}/query-dsl-simple-query-string-query.html[Simple Query String] | {query-ref}/SimpleQueryStringBuilder.html[SimpleQueryStringBuilder] | {query-ref}/QueryBuilders.html#simpleQueryStringQuery-java.lang.String-[QueryBuilders.simpleQueryStringQuery()] |====== diff --git a/docs/painless/painless-guide/index.asciidoc b/docs/painless/painless-guide/index.asciidoc index b45406a4e7273..2243608ffb172 100644 --- a/docs/painless/painless-guide/index.asciidoc +++ b/docs/painless/painless-guide/index.asciidoc @@ -1,5 +1,7 @@ include::painless-walkthrough.asciidoc[] +include::painless-datetime.asciidoc[] + include::painless-method-dispatch.asciidoc[] include::painless-debugging.asciidoc[] diff --git a/docs/painless/painless-guide/painless-datetime.asciidoc b/docs/painless/painless-guide/painless-datetime.asciidoc new file mode 100644 index 0000000000000..ef8ef8fd8c853 --- /dev/null +++ b/docs/painless/painless-guide/painless-datetime.asciidoc @@ -0,0 +1,320 @@ +[[painless-datetime]] +=== Using Datetime in Painless + +==== Datetime API + +Datetimes in Painless use the standard Java libraries and are available through +the Painless <>. Most of the classes +from the following Java packages are available to use in Painless scripts: + +* <> +* <> +* <> +* <> +* <> + +==== Datetime Representation + +Datetimes in Painless are most commonly represented as a +<>, a <>, or a +<>. + +long:: represents a datetime as the number of milliseconds or nanoseconds since +epoch (1970-01-01T00:00:00Z) +String:: represents a datetime as a sequence of characters defined by a +well-known standard such as https://en.wikipedia.org/wiki/ISO_8601[ISO 8601] or +defined by the source of input in a custom way +ZonedDateTime:: a <> (object) that contains an +internal representation of a datetime and provides numerous +<> for +modification and comparison. + +Switching between different representations of datetimes is often necessary to +achieve a script's objective(s). A typical pattern in a script is to switch a +long or String representation of a datetime to a ZonedDateTime representation, +modify or compare the ZonedDateTime representation, and then switch it back to +a long or String representation for storage or as a returned result. + +==== Datetime Parsing and Formatting + +Datetime parsing is a switch from a String representation to a ZonedDateTime +representation, and datetime formatting is a switch from a ZonedDateTime +representation to a String representation. + +A <> is a +<> (object) that defines the allowed sequence +of characters for a String representation of a datetime. Datetime parsing and +formatting often requires a DateTimeFormatter. For more information about how +to use a DateTimeFormatter see the +{java11-javadoc}/java.base/java/time/format/DateTimeFormatter.html[Java documentation]. + +===== Datetime Parsing Examples + +* parse from milliseconds ++ +[source,Painless] +---- +String milliSinceEpochString = "434931330000"; +long milliSinceEpoch = Long.parseLong(milliSinceEpochString); +Instant instant = Instant.ofEpochMilli(milliSinceEpoch); +ZonedDateTime zdt = ZonedDateTime.ofInstant(instant, ZoneId.of('Z')); +---- ++ +* parse from ISO 8601 ++ +[source,Painless] +---- +String datetime = '1983-10-13T22:15:30Z'; +ZonedDateTime zdt = ZonedDateTime.parse(datetime); +---- +Note the parse method uses ISO 8601 by default. ++ +* parse from RFC 1123 ++ +[source,Painless] +---- +String datetime = 'Thu, 13 Oct 1983 22:15:30 GMT'; +ZonedDateTime zdt = ZonedDateTime.parse(datetime, + DateTimeFormatter.RFC_1123_DATE_TIME); +---- +Note the use of a built-in DateTimeFormatter. ++ +* parse from a custom format ++ +[source,Painless] +---- +String datetime = 'custom y 1983 m 10 d 13 22:15:30 Z'; +DateTimeFormatter dtf = DateTimeFormatter.ofPattern( + "'custom' 'y' yyyy 'm' MM 'd' dd HH:mm:ss VV"); +ZonedDateTime zdt = ZonedDateTime.parse(datetime, dtf); +---- +Note the use of a custom DateTimeFormatter. + +===== Datetime Formatting Examples + +* format to a String (ISO 8601) ++ +[source,Painless] +---- +ZonedDateTime zdt = + ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z')); +String datetime = zdt.format(DateTimeFormatter.ISO_INSTANT); +---- +Note the use of a built-in DateTimeFormatter. ++ +* format to a String (custom) ++ +[source,Painless] +---- +ZonedDateTime zdt = + ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z')); +DateTimeFormatter dtf = DateTimeFormatter.ofPattern( + "'date:' yyyy/MM/dd 'time:' HH:mm:ss"); +String datetime = zdt.format(dtf); +---- +Note the use of a custom DateTimeFormatter. + +==== Datetime Conversion + +Datetime conversion is a switch from a long representation to a ZonedDateTime +representation and vice versa. + +===== Datetime Conversion Examples + +* convert from milliseconds ++ +[source,Painless] +---- +long milliSinceEpoch = 434931330000L; +Instant instant = Instant.ofEpochMilli(milliSinceEpoch); +ZonedDateTime zdt = ZonedDateTime.ofInstant(instant, ZoneId.of('Z')); +---- ++ +* convert to milliseconds ++ +[source,Painless] +----- +ZonedDateTime zdt = + ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z')); +long milliSinceEpoch = zdt.toInstant().toEpochMilli(); +----- + +==== Datetime Pieces + +Use the ZonedDateTime +<> to create a new +ZonedDateTime from pieces (year, month, day, hour, minute, second, nano, +time zone). Use ZonedDateTime +<> to extract pieces from +a ZonedDateTime. + +===== Datetime Pieces Examples + +* create a ZonedDateTime from pieces ++ +[source,Painless] +---- +int year = 1983; +int month = 10; +int day = 13; +int hour = 22; +int minutes = 15; +int seconds = 30; +int nanos = 0; +ZonedDateTime zdt = ZonedDateTime.of( + year, month, day, hour, minutes, seconds, nanos, ZoneId.of('Z')); +---- ++ +* extract pieces from a ZonedDateTime ++ +[source,Painless] +---- +ZonedDateTime zdt = + ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 100, ZoneId.of(tz)); +int year = zdt.getYear(); +int month = zdt.getMonthValue(); +int day = zdt.getDayOfMonth(); +int hour = zdt.getHour(); +int minutes = zdt.getMinute(); +int seconds = zdt.getSecond(); +int nanos = zdt.getNano(); +---- + +==== Datetime Modification + +Use either a long or a ZonedDateTime to do datetime modification such as adding +several seconds to a datetime or subtracting several days from a datetime. Use +standard <> to modify a long +representation of a datetime. Use ZonedDateTime +<> to modify a +ZonedDateTime representation of a datetime. Note most modification methods for +a ZonedDateTime return a new instance for assignment or immediate use. + +===== Datetime Modification Examples + +* Subtract three seconds from milliseconds ++ +[source,Painless] +---- +long milliSinceEpoch = 434931330000L; +milliSinceEpoch = milliSinceEpoch - 1000L*3L; +---- ++ +* Add three days to a datetime ++ +[source,Painless] +---- +ZonedDateTime zdt = + ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z')); +ZonedDateTime updatedZdt = zdt.plusDays(3); +---- ++ +* Subtract 125 minutes from a datetime ++ +[source,Painless] +---- +ZonedDateTime zdt = + ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z')); +ZonedDateTime updatedZdt = zdt.minusMinutes(125); +---- ++ +* Set the year on a datetime ++ +[source,Painless] +---- +ZonedDateTime zdt = + ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z')); +ZonedDateTime updatedZdt = zdt.withYear(1976); +---- + +==== Elapsed Time + +Use either two longs or two ZonedDateTimes to calculate an elapsed +time (difference) between two datetimes. Use +<> to calculate an elapsed time +between two longs of the same time unit such as milliseconds. For more complex +datetimes. use <> to +calculate the difference between two ZonedDateTimes. + +===== Elapsed Time Examples + +* Elapsed time for two millisecond datetimes ++ +[source,Painless] +---- +long startTimestamp = 434931327000L; +long endTimestamp = 434931330000L; +long differenceInMillis = endTimestamp - startTimestamp; +---- ++ +* Elapsed time in milliseconds for two datetimes ++ +[source,Painless] +---- +ZonedDateTime zdt1 = + ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 11000000, ZoneId.of('Z')); +ZonedDateTime zdt2 = + ZonedDateTime.of(1983, 10, 13, 22, 15, 35, 0, ZoneId.of('Z')); +long differenceInMillis = ChronoUnit.MILLIS.between(zdt1, zdt2); +---- ++ +* Elapsed time in days for two datetimes ++ +[source,Painless] +---- +ZonedDateTime zdt1 = + ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 11000000, ZoneId.of('Z')); +ZonedDateTime zdt2 = + ZonedDateTime.of(1983, 10, 17, 22, 15, 35, 0, ZoneId.of('Z')); +long differenceInDays = ChronoUnit.DAYS.between(zdt1, zdt2); +---- + +==== Datetime Comparison + +Use either two longs or two ZonedDateTimes to do a datetime comparison. Use +standard <> to compare two +longs of the same time unit such as milliseconds. For more complex datetimes, +use ZonedDateTime <> to +compare two ZonedDateTimes. + +===== Datetime Comparison Examples + +* Comparison of two millisecond datetimes ++ +[source,Painless] +---- +long timestamp1 = 434931327000L; +long timestamp2 = 434931330000L; + +if (timestamp1 > timestamp2) { + // handle condition +} +---- ++ +* Before comparision of two datetimes ++ +[source,Painless] +---- +ZonedDateTime zdt1 = + ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z')); +ZonedDateTime zdt2 = + ZonedDateTime.of(1983, 10, 17, 22, 15, 35, 0, ZoneId.of('Z')); + +if (zdt1.isBefore(zdt2)) { + // handle condition +} +---- ++ +* After comparision of two datetimes ++ +[source,Painless] +---- +ZonedDateTime zdt1 = + ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z')); +ZonedDateTime zdt2 = + ZonedDateTime.of(1983, 10, 17, 22, 15, 35, 0, ZoneId.of('Z')); + +if (zdt1.isAfter(zdt2)) { + // handle condition +} +---- diff --git a/docs/plugins/analysis-stempel.asciidoc b/docs/plugins/analysis-stempel.asciidoc index a5526129a97d3..cd234c8d42756 100644 --- a/docs/plugins/analysis-stempel.asciidoc +++ b/docs/plugins/analysis-stempel.asciidoc @@ -12,7 +12,107 @@ include::install_remove.asciidoc[] [[analysis-stempel-tokenizer]] [float] -==== `stempel` tokenizer and token filter +==== `stempel` tokenizer and token filters -The plugin provides the `polish` analyzer and `polish_stem` token filter, +The plugin provides the `polish` analyzer and the `polish_stem` and `polish_stop` token filters, which are not configurable. + +==== Reimplementing and extending the analyzers + +The `polish` analyzer could be reimplemented as a `custom` analyzer that can +then be extended and configured differently as follows: + +[source,js] +---------------------------------------------------- +PUT /stempel_example +{ + "settings": { + "analysis": { + "analyzer": { + "rebuilt_stempel": { + "tokenizer": "standard", + "filter": [ + "lowercase", + "polish_stop", + "polish_stem" + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: stempel_example, first: polish, second: rebuilt_stempel}\nendyaml\n/] + +[[analysis-polish-stop]] +==== `polish_stop` token filter + +The `polish_stop` token filter filters out Polish stopwords (`_polish_`), and +any other custom stopwords specified by the user. This filter only supports +the predefined `_polish_` stopwords list. If you want to use a different +predefined list, then use the +{ref}/analysis-stop-tokenfilter.html[`stop` token filter] instead. + +[source,js] +-------------------------------------------------- +PUT /polish_stop_example +{ + "settings": { + "index": { + "analysis": { + "analyzer": { + "analyzer_with_stop": { + "tokenizer": "standard", + "filter": [ + "lowercase", + "polish_stop" + ] + } + }, + "filter": { + "polish_stop": { + "type": "polish_stop", + "stopwords": [ + "_polish_", + "jeść" + ] + } + } + } + } + } +} + +GET polish_stop_example/_analyze +{ + "analyzer": "analyzer_with_stop", + "text": "Gdzie kucharek sześć, tam nie ma co jeść." +} +-------------------------------------------------- +// CONSOLE + +The above request returns: + +[source,js] +-------------------------------------------------- +{ + "tokens" : [ + { + "token" : "kucharek", + "start_offset" : 6, + "end_offset" : 14, + "type" : "", + "position" : 1 + }, + { + "token" : "sześć", + "start_offset" : 15, + "end_offset" : 20, + "type" : "", + "position" : 2 + } + ] +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index 0b7d9ce403466..b46bd21a32bb1 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -86,6 +86,10 @@ Where: `date_format`:: is the optional format in which the computed date should be rendered. Defaults to `yyyy.MM.dd`. Format should be compatible with java-time https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html `time_zone`:: is the optional time zone. Defaults to `utc`. +NOTE: Pay attention to the usage of small vs capital letters used in the `date_format`. For example: +`mm` denotes minute of hour, while `MM` denotes month of year. Similarly `hh` denotes the hour in the +`1-12` range in combination with `AM/PM`, while `HH` denotes the hour in the `0-23` 24-hour range. + Date math expressions are resolved locale-independent. Consequently, it is not possible to use any other calendars than the Gregorian calendar. diff --git a/docs/reference/data-frames/apis/pivotresource.asciidoc b/docs/reference/data-frames/apis/pivotresource.asciidoc new file mode 100644 index 0000000000000..64c49af03ea77 --- /dev/null +++ b/docs/reference/data-frames/apis/pivotresource.asciidoc @@ -0,0 +1,26 @@ +[role="xpack"] +[testenv="basic"] +[[data-frame-transform-pivot]] +=== Pivot resources + +A pivot configuration object has the following properties: + +`group_by` (required):: (object) Defines how to group the data. More than one grouping can be defined per pivot. The following groupings are supported: +* {ref}/search-aggregations-bucket-composite-aggregation.html#_terms[Terms] +* {ref}/search-aggregations-bucket-composite-aggregation.html#_histogram[Histogram] +* {ref}/search-aggregations-bucket-composite-aggregation.html#_date_histogram[Date Histogram] + +`aggregations` (required):: (object) Defines how to aggregate the grouped data. +The following aggregations are supported: +* {ref}/search-aggregations-metrics-avg-aggregation.html[Average] +* {ref}/search-aggregations-metrics-weight-avg-aggregation.html[Weighted Average] +* {ref}/search-aggregations-metrics-cardinality-aggregation.html[Cardinality] +* {ref}/search-aggregations-metrics-geocentroid-aggregation.html[Geo Centroid] +* {ref}/search-aggregations-metrics-max-aggregation.html[Max] +* {ref}/search-aggregations-metrics-min-aggregation.html[Min] +* {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Scripted Metric] +* {ref}/search-aggregations-metrics-sum-aggregation.html[Sum] +* {ref}/search-aggregations-metrics-valuecount-aggregation.html[Value Count] +* {ref}/search-aggregations-pipeline-bucket-script-aggregation.html[Bucket Script] + +//For more information, see {stack-ov}/ml-dataframes.html[dataframes-cap}]. \ No newline at end of file diff --git a/docs/reference/data-frames/apis/put-transform.asciidoc b/docs/reference/data-frames/apis/put-transform.asciidoc index f452c38ab4c94..fcc86fa3237d7 100644 --- a/docs/reference/data-frames/apis/put-transform.asciidoc +++ b/docs/reference/data-frames/apis/put-transform.asciidoc @@ -39,7 +39,7 @@ a `query`. `dest` (required):: (object) The destination configuration, consisting of `index`. `pivot`:: (object) Defines the pivot function `group by` fields and the aggregation to -reduce the data. +reduce the data. See <>. `description`:: Optional free text description of the data frame transform diff --git a/docs/reference/ingest/processors/date-index-name.asciidoc b/docs/reference/ingest/processors/date-index-name.asciidoc index fa749fd342056..783ecc9b2b1b0 100644 --- a/docs/reference/ingest/processors/date-index-name.asciidoc +++ b/docs/reference/ingest/processors/date-index-name.asciidoc @@ -140,6 +140,6 @@ understands this to mean `2016-04-01` as is explained in the <>. +| `index_name_format` | no | yyyy-MM-dd | The format to be used when printing the parsed date into the index name. A valid java time pattern is expected here. Supports <>. include::common-options.asciidoc[] |====== diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index de0f3f2a5f1cd..63bb4591369e5 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -159,6 +159,22 @@ Nested documents can be: * sorted with <>. * retrieved and highlighted with <>. +[IMPORTANT] +============================================= + +Because nested documents are indexed as separate documents, they can only be +accessed within the scope of the `nested` query, the +`nested`/`reverse_nested` aggregations, or <>. + +For instance, if a string field within a nested document has +<> set to `offsets` to allow use of the postings +during the highlighting, these offsets will not be available during the main highlighting +phase. Instead, highlighting needs to be performed via +<>. The same consideration applies when loading +fields during a search through <> +or <>. + +============================================= [[nested-params]] ==== Parameters for `nested` fields @@ -178,21 +194,6 @@ The following parameters are accepted by `nested` fields: may be added to an existing nested object. -[IMPORTANT] -============================================= - -Because nested documents are indexed as separate documents, they can only be -accessed within the scope of the `nested` query, the -`nested`/`reverse_nested` aggregations, or <>. - -For instance, if a string field within a nested document has -<> set to `offsets` to allow use of the postings -during the highlighting, these offsets will not be available during the main highlighting -phase. Instead, highlighting needs to be performed via -<>. - -============================================= - [float] === Limits on `nested` mappings and objects diff --git a/docs/reference/migration/migrate_8_0.asciidoc b/docs/reference/migration/migrate_8_0.asciidoc index b697abf9a9f25..ff3f5030ed9fb 100644 --- a/docs/reference/migration/migrate_8_0.asciidoc +++ b/docs/reference/migration/migrate_8_0.asciidoc @@ -15,6 +15,7 @@ coming[8.0.0] * <> * <> * <> +* <> * <> * <> * <> @@ -24,6 +25,7 @@ coming[8.0.0] * <> * <> * <> +* <> * <> //NOTE: The notable-breaking-changes tagged regions are re-used in the @@ -51,6 +53,7 @@ include::migrate_8_0/analysis.asciidoc[] include::migrate_8_0/discovery.asciidoc[] include::migrate_8_0/mappings.asciidoc[] include::migrate_8_0/packaging.asciidoc[] +include::migrate_8_0/rollup.asciidoc[] include::migrate_8_0/snapshots.asciidoc[] include::migrate_8_0/security.asciidoc[] include::migrate_8_0/ilm.asciidoc[] @@ -60,4 +63,5 @@ include::migrate_8_0/node.asciidoc[] include::migrate_8_0/transport.asciidoc[] include::migrate_8_0/http.asciidoc[] include::migrate_8_0/reindex.asciidoc[] +include::migrate_8_0/search.asciidoc[] include::migrate_8_0/settings.asciidoc[] diff --git a/docs/reference/migration/migrate_8_0/rollup.asciidoc b/docs/reference/migration/migrate_8_0/rollup.asciidoc new file mode 100644 index 0000000000000..71c9b38d444d6 --- /dev/null +++ b/docs/reference/migration/migrate_8_0/rollup.asciidoc @@ -0,0 +1,20 @@ +[float] +[[breaking_80_rollup_changes]] +=== Rollup changes + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + +[float] +==== StartRollupJob endpoint returns success if job already started + +Previously, attempting to start an already-started rollup job would +result in a `500 InternalServerError: Cannot start task for Rollup Job +[job] because state was [STARTED]` exception. + +Now, attempting to start a job that is already started will just +return a successful `200 OK: started` response. \ No newline at end of file diff --git a/docs/reference/migration/migrate_8_0/search.asciidoc b/docs/reference/migration/migrate_8_0/search.asciidoc index 82886d35bc6a5..6fba2970f593e 100644 --- a/docs/reference/migration/migrate_8_0/search.asciidoc +++ b/docs/reference/migration/migrate_8_0/search.asciidoc @@ -7,4 +7,16 @@ The `/{index}/{type}/_search`, `/{index}/{type}/_msearch`, `/{index}/{type}/_search/template` and `/{index}/{type}/_msearch/template` REST endpoints have been removed in favour of `/{index}/_search`, `/{index}/_msearch`, `/{index}/_search/template` and `/{index}/_msearch/template`, since indexes no longer contain types, these typed endpoints are obsolete.. -The `/{index}/{type}/_termvectors`, `/{index}/{type}/{id}/_termvectors` and `/{index}/{type}/_mtermvectors` REST endpoints have been removed in favour of `/{index}/_termvectors`, `/{index}/{id}/_termvectors` and `/{index}/_mtermvectors`, since indexes no longer contain types, these typed endpoints are obsolete.. \ No newline at end of file +The `/{index}/{type}/_termvectors`, `/{index}/{type}/{id}/_termvectors` and `/{index}/{type}/_mtermvectors` REST endpoints have been removed in favour of `/{index}/_termvectors`, `/{index}/{id}/_termvectors` and `/{index}/_mtermvectors`, since indexes no longer contain types, these typed endpoints are obsolete.. + +[float] +==== Removal of queries + +The `common` query was deprecated in 7.x and has been removed in 8.0. +The same functionality can be achieved by the `match` query if the total number of hits is not tracked. + +[float] +===== Removal of query parameters + +The `cutoff_frequency` parameter was deprecated in 7.x and has been removed in 8.0 from `match` and `multi_match` queries. +The same functionality can be achieved without any configuration provided that the total number of hits is not tracked. diff --git a/docs/reference/ml/apis/find-file-structure.asciidoc b/docs/reference/ml/apis/find-file-structure.asciidoc index e9d9da479c0f2..b0835ff9466bb 100644 --- a/docs/reference/ml/apis/find-file-structure.asciidoc +++ b/docs/reference/ml/apis/find-file-structure.asciidoc @@ -92,6 +92,13 @@ chosen. parameter is not specified, the structure finder guesses based on the similarity of the first row of the file to other rows. +`line_merge_size_limit`:: + (unsigned integer) The maximum number of characters in a message when lines are + merged to form messages while analyzing semi-structured files. The default + is 10000. If you have extremely long messages you may need to increase this, but + be aware that this may lead to very long processing times if the way to group + lines into messages is misdetected. + `lines_to_sample`:: (unsigned integer) The number of lines to include in the structural analysis, starting from the beginning of the file. The minimum is 2; the default diff --git a/docs/reference/modules/discovery/adding-removing-nodes.asciidoc b/docs/reference/modules/discovery/adding-removing-nodes.asciidoc index ccc0e99125371..2a62bb5e49dd3 100644 --- a/docs/reference/modules/discovery/adding-removing-nodes.asciidoc +++ b/docs/reference/modules/discovery/adding-removing-nodes.asciidoc @@ -35,30 +35,36 @@ four of the nodes at once: to do so would leave only three nodes remaining, which is less than half of the voting configuration, which means the cluster cannot take any further actions. +More precisely, if you shut down half or more of the master-eligible nodes all +at the same time then the cluster will normally become unavailable. If this +happens then you can bring the cluster back online by starting the removed +nodes again. + As long as there are at least three master-eligible nodes in the cluster, as a general rule it is best to remove nodes one-at-a-time, allowing enough time for the cluster to <> the voting configuration and adapt the fault tolerance level to the new set of nodes. If there are only two master-eligible nodes remaining then neither node can be -safely removed since both are required to reliably make progress. You must first -inform Elasticsearch that one of the nodes should not be part of the voting -configuration, and that the voting power should instead be given to other nodes. -You can then take the excluded node offline without preventing the other node -from making progress. A node which is added to a voting configuration exclusion -list still works normally, but Elasticsearch tries to remove it from the voting -configuration so its vote is no longer required. Importantly, Elasticsearch -will never automatically move a node on the voting exclusions list back into the -voting configuration. Once an excluded node has been successfully +safely removed since both are required to reliably make progress. To remove one +of these nodes you must first inform {es} that it should not be part of the +voting configuration, and that the voting power should instead be given to the +other node. You can then take the excluded node offline without preventing the +other node from making progress. A node which is added to a voting +configuration exclusion list still works normally, but {es} tries to remove it +from the voting configuration so its vote is no longer required. Importantly, +{es} will never automatically move a node on the voting exclusions list back +into the voting configuration. Once an excluded node has been successfully auto-reconfigured out of the voting configuration, it is safe to shut it down without affecting the cluster's master-level availability. A node can be added -to the voting configuration exclusion list using the <> API. For example: +to the voting configuration exclusion list using the +<> API. For example: [source,js] -------------------------------------------------- -# Add node to voting configuration exclusions list and wait for the system to -# auto-reconfigure the node out of the voting configuration up to the default -# timeout of 30 seconds +# Add node to voting configuration exclusions list and wait for the system +# to auto-reconfigure the node out of the voting configuration up to the +# default timeout of 30 seconds POST /_cluster/voting_config_exclusions/node_name # Add node to voting configuration exclusions list and wait for diff --git a/docs/reference/modules/discovery/bootstrapping.asciidoc b/docs/reference/modules/discovery/bootstrapping.asciidoc index 2b17af17ec5da..cc7cb0ea91227 100644 --- a/docs/reference/modules/discovery/bootstrapping.asciidoc +++ b/docs/reference/modules/discovery/bootstrapping.asciidoc @@ -6,8 +6,9 @@ set of <> to be explicitly defined on one or more of the master-eligible nodes in the cluster. This is known as _cluster bootstrapping_. This is only required the very first time the cluster starts up: nodes that have already joined a cluster store this information in their -data folder and freshly-started nodes that are joining an existing cluster -obtain this information from the cluster's elected master. +data folder for use in a <>, and +freshly-started nodes that are joining a running cluster obtain this +information from the cluster's elected master. The initial set of master-eligible nodes is defined in the <>. This should be @@ -58,19 +59,6 @@ cluster.initial_master_nodes: - master-c -------------------------------------------------- -If it is not possible to use the names of the nodes then you can also use IP -addresses, or IP addresses and ports, or even a mix of IP addresses and node -names: - -[source,yaml] --------------------------------------------------- -cluster.initial_master_nodes: - - 10.0.10.101 - - 10.0.10.102:9300 - - 10.0.10.102:9301 - - master-node-name --------------------------------------------------- - Like all node settings, it is also possible to specify the initial set of master nodes on the command-line that is used to start Elasticsearch: @@ -139,3 +127,29 @@ in the <>: * `discovery.seed_providers` * `discovery.seed_hosts` * `cluster.initial_master_nodes` + +[NOTE] +================================================== + +[[modules-discovery-bootstrap-cluster-joining]] If you start an {es} node +without configuring these settings then it will start up in development mode and +auto-bootstrap itself into a new cluster. If you start some {es} nodes on +different hosts then by default they will not discover each other and will form +a different cluster on each host. {es} will not merge separate clusters together +after they have formed, even if you subsequently try and configure all the nodes +into a single cluster. This is because there is no way to merge these separate +clusters together without a risk of data loss. You can tell that you have formed +separate clusters by checking the cluster UUID reported by `GET /` on each node. +If you intended to form a single cluster then you should start again: + +* Take a <> of each of the single-host clusters if + you do not want to lose any data that they hold. Note that each cluster must + use its own snapshot repository. +* Shut down all the nodes. +* Completely wipe each node by deleting the contents of their + <>. +* Configure `cluster.initial_master_nodes` as described above. +* Restart all the nodes and verify that they have formed a single cluster. +* <> any snapshots as required. + +================================================== diff --git a/docs/reference/query-dsl/common-terms-query.asciidoc b/docs/reference/query-dsl/common-terms-query.asciidoc deleted file mode 100644 index f2d784eb0c4c9..0000000000000 --- a/docs/reference/query-dsl/common-terms-query.asciidoc +++ /dev/null @@ -1,306 +0,0 @@ -[[query-dsl-common-terms-query]] -=== Common Terms Query - -deprecated[7.3.0,"Use <> instead, which skips blocks of documents efficiently, without any configuration, provided that the total number of hits is not tracked."] - -The `common` terms query is a modern alternative to stopwords which -improves the precision and recall of search results (by taking stopwords -into account), without sacrificing performance. - -[float] -==== The problem - -Every term in a query has a cost. A search for `"The brown fox"` -requires three term queries, one for each of `"the"`, `"brown"` and -`"fox"`, all of which are executed against all documents in the index. -The query for `"the"` is likely to match many documents and thus has a -much smaller impact on relevance than the other two terms. - -Previously, the solution to this problem was to ignore terms with high -frequency. By treating `"the"` as a _stopword_, we reduce the index size -and reduce the number of term queries that need to be executed. - -The problem with this approach is that, while stopwords have a small -impact on relevance, they are still important. If we remove stopwords, -we lose precision, (eg we are unable to distinguish between `"happy"` -and `"not happy"`) and we lose recall (eg text like `"The The"` or -`"To be or not to be"` would simply not exist in the index). - -[float] -==== The solution - -The `common` terms query divides the query terms into two groups: more -important (ie _low frequency_ terms) and less important (ie _high -frequency_ terms which would previously have been stopwords). - -First it searches for documents which match the more important terms. -These are the terms which appear in fewer documents and have a greater -impact on relevance. - -Then, it executes a second query for the less important terms -- terms -which appear frequently and have a low impact on relevance. But instead -of calculating the relevance score for *all* matching documents, it only -calculates the `_score` for documents already matched by the first -query. In this way the high frequency terms can improve the relevance -calculation without paying the cost of poor performance. - -If a query consists only of high frequency terms, then a single query is -executed as an `AND` (conjunction) query, in other words all terms are -required. Even though each individual term will match many documents, -the combination of terms narrows down the resultset to only the most -relevant. The single query can also be executed as an `OR` with a -specific -<>, -in this case a high enough value should probably be used. - -Terms are allocated to the high or low frequency groups based on the -`cutoff_frequency`, which can be specified as an absolute frequency -(`>=1`) or as a relative frequency (`0.0 .. 1.0`). (Remember that document -frequencies are computed on a per shard level as explained in the blog post -{defguide}/relevance-is-broken.html[Relevance is broken].) - -Perhaps the most interesting property of this query is that it adapts to -domain specific stopwords automatically. For example, on a video hosting -site, common terms like `"clip"` or `"video"` will automatically behave -as stopwords without the need to maintain a manual list. - -[float] -==== Examples - -In this example, words that have a document frequency greater than 0.1% -(eg `"this"` and `"is"`) will be treated as _common terms_. - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "common": { - "body": { - "query": "this is bonsai cool", - "cutoff_frequency": 0.001 - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] - -The number of terms which should match can be controlled with the -<> -(`high_freq`, `low_freq`), `low_freq_operator` (default `"or"`) and -`high_freq_operator` (default `"or"`) parameters. - -For low frequency terms, set the `low_freq_operator` to `"and"` to make -all terms required: - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "common": { - "body": { - "query": "nelly the elephant as a cartoon", - "cutoff_frequency": 0.001, - "low_freq_operator": "and" - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] - -which is roughly equivalent to: - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "bool": { - "must": [ - { "term": { "body": "nelly"}}, - { "term": { "body": "elephant"}}, - { "term": { "body": "cartoon"}} - ], - "should": [ - { "term": { "body": "the"}}, - { "term": { "body": "as"}}, - { "term": { "body": "a"}} - ] - } - } -} --------------------------------------------------- -// CONSOLE - -Alternatively use -<> -to specify a minimum number or percentage of low frequency terms which -must be present, for instance: - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "common": { - "body": { - "query": "nelly the elephant as a cartoon", - "cutoff_frequency": 0.001, - "minimum_should_match": 2 - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] - -which is roughly equivalent to: - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "bool": { - "must": { - "bool": { - "should": [ - { "term": { "body": "nelly"}}, - { "term": { "body": "elephant"}}, - { "term": { "body": "cartoon"}} - ], - "minimum_should_match": 2 - } - }, - "should": [ - { "term": { "body": "the"}}, - { "term": { "body": "as"}}, - { "term": { "body": "a"}} - ] - } - } -} --------------------------------------------------- -// CONSOLE - -A different -<> -can be applied for low and high frequency terms with the additional -`low_freq` and `high_freq` parameters. Here is an example when providing -additional parameters (note the change in structure): - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "common": { - "body": { - "query": "nelly the elephant not as a cartoon", - "cutoff_frequency": 0.001, - "minimum_should_match": { - "low_freq" : 2, - "high_freq" : 3 - } - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] - -which is roughly equivalent to: - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "bool": { - "must": { - "bool": { - "should": [ - { "term": { "body": "nelly"}}, - { "term": { "body": "elephant"}}, - { "term": { "body": "cartoon"}} - ], - "minimum_should_match": 2 - } - }, - "should": { - "bool": { - "should": [ - { "term": { "body": "the"}}, - { "term": { "body": "not"}}, - { "term": { "body": "as"}}, - { "term": { "body": "a"}} - ], - "minimum_should_match": 3 - } - } - } - } -} --------------------------------------------------- -// CONSOLE - -In this case it means the high frequency terms have only an impact on -relevance when there are at least three of them. But the most -interesting use of the -<> -for high frequency terms is when there are only high frequency terms: - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "common": { - "body": { - "query": "how not to be", - "cutoff_frequency": 0.001, - "minimum_should_match": { - "low_freq" : 2, - "high_freq" : 3 - } - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] - -which is roughly equivalent to: - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "bool": { - "should": [ - { "term": { "body": "how"}}, - { "term": { "body": "not"}}, - { "term": { "body": "to"}}, - { "term": { "body": "be"}} - ], - "minimum_should_match": "3<50%" - } - } -} --------------------------------------------------- -// CONSOLE - -The high frequency generated query is then slightly less restrictive -than with an `AND`. - -The `common` terms query also supports `boost` and `analyzer` as -parameters. diff --git a/docs/reference/query-dsl/full-text-queries.asciidoc b/docs/reference/query-dsl/full-text-queries.asciidoc index 0af99b61f194f..8fc53bc7e9b8a 100644 --- a/docs/reference/query-dsl/full-text-queries.asciidoc +++ b/docs/reference/query-dsl/full-text-queries.asciidoc @@ -29,10 +29,6 @@ The queries in this group are: The multi-field version of the `match` query. -<>:: - - A more specialized query which gives more preference to uncommon words. - <>:: Supports the compact Lucene <>, @@ -59,8 +55,6 @@ include::match-bool-prefix-query.asciidoc[] include::multi-match-query.asciidoc[] -include::common-terms-query.asciidoc[] - include::query-string-query.asciidoc[] include::simple-query-string-query.asciidoc[] diff --git a/docs/reference/query-dsl/match-query.asciidoc b/docs/reference/query-dsl/match-query.asciidoc index 14fc155cfccae..4b998d82cda24 100644 --- a/docs/reference/query-dsl/match-query.asciidoc +++ b/docs/reference/query-dsl/match-query.asciidoc @@ -119,53 +119,6 @@ GET /_search -------------------------------------------------- // CONSOLE -[[query-dsl-match-query-cutoff]] -===== Cutoff frequency - -deprecated[7.3.0,"This option can be omitted as the <> can skip block of documents efficiently, without any configuration, provided that the total number of hits is not tracked."] - -The match query supports a `cutoff_frequency` that allows -specifying an absolute or relative document frequency where high -frequency terms are moved into an optional subquery and are only scored -if one of the low frequency (below the cutoff) terms in the case of an -`or` operator or all of the low frequency terms in the case of an `and` -operator match. - -This query allows handling `stopwords` dynamically at runtime, is domain -independent and doesn't require a stopword file. It prevents scoring / -iterating high frequency terms and only takes the terms into account if a -more significant / lower frequency term matches a document. Yet, if all -of the query terms are above the given `cutoff_frequency` the query is -automatically transformed into a pure conjunction (`and`) query to -ensure fast execution. - -The `cutoff_frequency` can either be relative to the total number of -documents if in the range `[0..1)` or absolute if greater or equal to -`1.0`. - -Here is an example showing a query composed of stopwords exclusively: - -[source,js] --------------------------------------------------- -GET /_search -{ - "query": { - "match" : { - "message" : { - "query" : "to be or not to be", - "cutoff_frequency" : 0.001 - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[warning:Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]] - -IMPORTANT: The `cutoff_frequency` option operates on a per-shard-level. This means -that when trying it out on test indexes with low document numbers you -should follow the advice in {defguide}/relevance-is-broken.html[Relevance is broken]. - [[query-dsl-match-query-synonyms]] ===== Synonyms diff --git a/docs/reference/query-dsl/multi-match-query.asciidoc b/docs/reference/query-dsl/multi-match-query.asciidoc index 9f574ed814d3c..e7bcd799df702 100644 --- a/docs/reference/query-dsl/multi-match-query.asciidoc +++ b/docs/reference/query-dsl/multi-match-query.asciidoc @@ -151,8 +151,8 @@ follows: Also, accepts `analyzer`, `boost`, `operator`, `minimum_should_match`, `fuzziness`, `lenient`, `prefix_length`, `max_expansions`, `rewrite`, `zero_terms_query`, - `cutoff_frequency`, `auto_generate_synonyms_phrase_query` and `fuzzy_transpositions`, - as explained in <>. +`auto_generate_synonyms_phrase_query` and `fuzzy_transpositions`, +as explained in <>. [IMPORTANT] [[operator-min]] @@ -247,9 +247,7 @@ The score from each `match` clause is added together, then divided by the number of `match` clauses. Also, accepts `analyzer`, `boost`, `operator`, `minimum_should_match`, -`fuzziness`, `lenient`, `prefix_length`, `max_expansions`, `rewrite`, `zero_terms_query` -and `cutoff_frequency`, as explained in <>, but -*see <>*. +`fuzziness`, `lenient`, `prefix_length`, `max_expansions`, `rewrite`, and `zero_terms_query`. [[type-phrase]] ==== `phrase` and `phrase_prefix` @@ -292,9 +290,9 @@ GET /_search -------------------------------------------------- // CONSOLE -Also, accepts `analyzer`, `boost`, `lenient`, `slop` and `zero_terms_query` as explained -in <>. Type `phrase_prefix` additionally accepts -`max_expansions`. +Also, accepts `analyzer`, <>, `lenient` and `zero_terms_query` as explained +in <>, as well as `slop` which is explained in <>. +Type `phrase_prefix` additionally accepts `max_expansions`. [IMPORTANT] [[phrase-fuzziness]] @@ -389,8 +387,7 @@ explanation: +blended("smith", fields: [first_name, last_name]) Also, accepts `analyzer`, `boost`, `operator`, `minimum_should_match`, -`lenient`, `zero_terms_query` and `cutoff_frequency`, as explained in -<>. +`lenient` and `zero_terms_query`. [[cross-field-analysis]] ===== `cross_field` and analysis @@ -554,5 +551,4 @@ explained in <> are supported. The construct term queries, but do not have an effect on the prefix query constructed from the final term. -The `slop` and `cutoff_frequency` parameters are not supported by this query -type. +The `slop` parameter is not supported by this query type. diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 077d4f15f84c7..579378b5e28fd 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -609,3 +609,10 @@ The `TransportClient` is deprecated in favour of the Elasticsearch 8.0. The {java-rest}/java-rest-high-level-migration.html[migration guide] describes all the steps needed to migrate. + +[role="exclude",id="query-dsl-common-terms-query"] +=== Common Terms Query + +The `common` terms query is deprecated. Use the <> instead. The `match` query skips blocks of documents efficiently, +without any configuration, if the total number of hits is not tracked. \ No newline at end of file diff --git a/docs/reference/rest-api/defs.asciidoc b/docs/reference/rest-api/defs.asciidoc index 823b63cbe579d..65a0384a3d3bf 100644 --- a/docs/reference/rest-api/defs.asciidoc +++ b/docs/reference/rest-api/defs.asciidoc @@ -12,6 +12,7 @@ These resource definitions are used in APIs related to {ml-features} and * <> * <> * <> +* <> * <> * <> * <> @@ -22,6 +23,7 @@ include::{es-repo-dir}/ml/apis/filterresource.asciidoc[] include::{es-repo-dir}/ml/apis/jobresource.asciidoc[] include::{es-repo-dir}/ml/apis/jobcounts.asciidoc[] include::{es-repo-dir}/ml/apis/snapshotresource.asciidoc[] +include::{es-repo-dir}/data-frames/apis/pivotresource.asciidoc[] include::{xes-repo-dir}/rest-api/security/role-mapping-resources.asciidoc[] include::{es-repo-dir}/ml/apis/resultsresource.asciidoc[] include::{es-repo-dir}/ml/apis/eventresource.asciidoc[] diff --git a/docs/reference/search/request/docvalue-fields.asciidoc b/docs/reference/search/request/docvalue-fields.asciidoc index 6697b5bb3e383..784cc94015366 100644 --- a/docs/reference/search/request/docvalue-fields.asciidoc +++ b/docs/reference/search/request/docvalue-fields.asciidoc @@ -67,3 +67,7 @@ on their mappings: `long`, `double` and other numeric fields are formatted as numbers, `keyword` fields are formatted as strings, `date` fields are formatted with the configured `date` format, etc. +NOTE: On its own, `docvalue_fields` cannot be used to load fields in nested +objects -- if a field contains a nested object in its path, then no data will +be returned for that docvalue field. To access nested fields, `docvalue_fields` +must be used within an <> block. \ No newline at end of file diff --git a/docs/reference/search/request/preference.asciidoc b/docs/reference/search/request/preference.asciidoc index 5f3fcb2efa6b7..7412f04844c08 100644 --- a/docs/reference/search/request/preference.asciidoc +++ b/docs/reference/search/request/preference.asciidoc @@ -6,8 +6,12 @@ default, Elasticsearch selects from the available shard copies in an unspecified order, taking the <> and <> configuration into account. However, it may sometimes be desirable to try and route certain -searches to certain sets of shard copies, for instance to make better use of -per-copy caches. +searches to certain sets of shard copies. + +A possible use case would be to make use of per-copy caches like the +<>. Doing this, however, runs contrary to the +idea of search parallelization and can create hotspots on certain nodes because +the load might not be evenly distributed anymore. The `preference` is a query string parameter which can be set to: @@ -64,6 +68,10 @@ GET /_search?preference=xyzabc123 ------------------------------------------------ // CONSOLE +This can be an effective strategy to increase usage of e.g. the request cache for +unique users running similar searches repeatedly by always hitting the same cache, while +requests of different users are still spread across all shard copies. + NOTE: The `_only_local` preference guarantees only to use shard copies on the local node, which is sometimes useful for troubleshooting. All other options do not _fully_ guarantee that any particular shard copies are used in a search, diff --git a/docs/reference/search/request/stored-fields.asciidoc b/docs/reference/search/request/stored-fields.asciidoc index 195dc39f11e73..b55e0fce45757 100644 --- a/docs/reference/search/request/stored-fields.asciidoc +++ b/docs/reference/search/request/stored-fields.asciidoc @@ -49,6 +49,11 @@ Script fields can also be automatically detected and used as fields, so things like `_source.obj1.field1` can be used, though not recommended, as `obj1.field1` will work as well. +NOTE: On its own, `stored_fields` cannot be used to load fields in nested +objects -- if a field contains a nested object in its path, then no data will +be returned for that stored field. To access nested fields, `stored_fields` +must be used within an <> block. + ==== Disable stored fields entirely To disable the stored fields (and metadata fields) entirely use: `_none_`: diff --git a/docs/reference/settings/notification-settings.asciidoc b/docs/reference/settings/notification-settings.asciidoc index ac7160bd20aac..a2eb84bc2110e 100644 --- a/docs/reference/settings/notification-settings.asciidoc +++ b/docs/reference/settings/notification-settings.asciidoc @@ -37,14 +37,8 @@ required. For more information, see {xpack-ref}/encrypting-data.html[Encrypting sensitive data in {watcher}]. `xpack.watcher.history.cleaner_service.enabled`:: -ifdef::asciidoctor[] added:[6.3.0,Default changed to `true`.] deprecated:[7.0.0,Watcher history indices are now managed by the `watch-history-ilm-policy` ILM policy] -endif::[] -ifndef::asciidoctor[] -added[6.3.0,Default changed to `true`.] -deprecated[7.0.0,Watcher history indices are now managed by the `watch-history-ilm-policy` ILM policy] -endif::[] + Set to `true` (default) to enable the cleaner service. If this setting is `true`, the `xpack.monitoring.enabled` setting must also be set to `true` with diff --git a/docs/reference/settings/ssl-settings.asciidoc b/docs/reference/settings/ssl-settings.asciidoc index 6d8ffd90b6a37..a9c8576a8c4e9 100644 --- a/docs/reference/settings/ssl-settings.asciidoc +++ b/docs/reference/settings/ssl-settings.asciidoc @@ -38,13 +38,8 @@ endif::verifies[] Supported cipher suites can be found in Oracle's http://docs.oracle.com/javase/8/docs/technotes/guides/security/SunProviders.html[ Java Cryptography Architecture documentation]. Defaults to ``. -ifdef::asciidoctor[] [#{ssl-context}-tls-ssl-key-trusted-certificate-settings] ===== {component} TLS/SSL Key and Trusted Certificate Settings -endif::[] -ifndef::asciidoctor[] -===== anchor:{ssl-context}-tls-ssl-key-trusted-certificate-settings[] {component} TLS/SSL Key and Trusted Certificate Settings -endif::[] The following settings are used to specify a private key, certificate, and the trusted certificates that should be used when communicating over an SSL/TLS connection. @@ -110,13 +105,8 @@ Password to the truststore. +{ssl-prefix}.ssl.truststore.secure_password+ (<>):: Password to the truststore. -ifdef::asciidoctor[] [#{ssl-context}-pkcs12-files] ===== PKCS#12 Files -endif::[] -ifndef::asciidoctor[] -===== anchor:{ssl-context}-pkcs12-files[] PKCS#12 Files -endif::[] {es} can be configured to use PKCS#12 container files (`.p12` or `.pfx` files) that contain the private key, certificate and certificates that should be trusted. @@ -154,13 +144,8 @@ Password to the PKCS#12 file. +{ssl-prefix}.ssl.truststore.secure_password+ (<>):: Password to the PKCS#12 file. -ifdef::asciidoctor[] [#{ssl-context}-pkcs11-tokens] ===== PKCS#11 Tokens -endif::[] -ifndef::asciidoctor[] -===== anchor:{ssl-context}-pkcs11-tokens[] PKCS#11 Tokens -endif::[] {es} can be configured to use a PKCS#11 token that contains the private key, certificate and certificates that should be trusted. diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 9037a292168de..e8dd1ee95957a 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -372,6 +372,12 @@ published ports with `--publish-all`, unless you are pinning one container per h . Use the `ES_JAVA_OPTS` environment variable to set heap size. For example, to use 16GB, use `-e ES_JAVA_OPTS="-Xms16g -Xmx16g"` with `docker run`. ++ +-- +NOTE: You still need to <> even if you are +https://docs.docker.com/config/containers/resource_constraints/#limit-a-containers-access-to-memory[limiting +memory access] to the container. +-- . Pin your deployments to a specific version of the {es} Docker image, for example +docker.elastic.co/elasticsearch/elasticsearch:{version}+. diff --git a/docs/reference/sql/functions/conditional.asciidoc b/docs/reference/sql/functions/conditional.asciidoc index d0b8e7d2ff3f1..0206115c182c1 100644 --- a/docs/reference/sql/functions/conditional.asciidoc +++ b/docs/reference/sql/functions/conditional.asciidoc @@ -33,17 +33,17 @@ If the condition’s result is true, the value of the result expression that fol the subsequent when clauses will be skipped and not processed. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[case] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[caseReturnNull] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[caseWithElse] ---- @@ -70,12 +70,12 @@ CASE WHEN expression = value1 THEN result1 END ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[caseWithOperand] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[caseWithOperandAndElse] ---- @@ -155,12 +155,12 @@ If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[coalesceReturnNonNull] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[coalesceReturnNull] ---- @@ -199,12 +199,12 @@ If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[greatestReturnNonNull] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[greatestReturnNull] ---- @@ -237,12 +237,12 @@ If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[ifNullReturnFirst] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[ifNullReturnSecond] ---- @@ -277,12 +277,12 @@ logic of programming languages. If the 3rd expression is not provided and the co `null` is returned. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[iifWithDefaultValue] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[iifWithoutDefaultValue] ---- @@ -325,12 +325,12 @@ If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[isNullReturnFirst] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[isNullReturnSecond] ---- @@ -370,12 +370,12 @@ If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[leastReturnNonNull] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[leastReturnNull] ---- @@ -407,12 +407,12 @@ Returns `null` when the two input expressions are equal and if not, it returns the 1st expression. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[nullIfReturnFirst] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[nullIfReturnNull] ---- @@ -446,12 +446,12 @@ If all arguments are null, then it returns `null`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[nvlReturnFirst] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[nvlReturnSecond] ---- diff --git a/docs/reference/sql/functions/date-time.asciidoc b/docs/reference/sql/functions/date-time.asciidoc index d9d5e7bcf14e5..45231393521c7 100644 --- a/docs/reference/sql/functions/date-time.asciidoc +++ b/docs/reference/sql/functions/date-time.asciidoc @@ -57,32 +57,32 @@ s|Description Basic arithmetic operators (`+`, `-`, etc) support date/time parameters as indicated below: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dtIntervalPlusInterval] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dtDateTimePlusInterval] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dtMinusInterval] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dtIntervalMinusInterval] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dtDateTimeMinusInterval] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dtIntervalMul] -------------------------------------------------- @@ -116,17 +116,17 @@ Unlike CURRENT_DATE, `CURDATE()` can only be used as a function with no argument This method always returns the same value for its every occurrence within the same query. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[currentDate] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[currentDateFunction] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[curDateFunction] -------------------------------------------------- @@ -134,7 +134,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[curDateFunction] Typically, this function (as well as its twin <> function is used for relative date filtering: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[filterToday] -------------------------------------------------- @@ -165,29 +165,29 @@ meaning a milliseconds precision current time will be returned. This method always returns the same value for its every occurrence within the same query. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[currentTime] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[currentTimeFunction] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[curTimeFunction] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[currentTimeFunctionPrecision] -------------------------------------------------- Typically, this function is used for relative date/time filtering: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[filterCurrentTime] -------------------------------------------------- @@ -221,17 +221,17 @@ meaning a milliseconds precision current date/time will be returned. This method always returns the same value for its every occurrence within the same query. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[curTs] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[curTsFunction] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[curTsFunctionPrecision] -------------------------------------------------- @@ -239,7 +239,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[curTsFunctionPrecision] Typically, this function (as well as its twin <> function is used for relative date/time filtering: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[filterNow] -------------------------------------------------- @@ -267,7 +267,7 @@ DAY_OF_MONTH(datetime_exp) <1> Extract the day of the month from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfMonth] -------------------------------------------------- @@ -291,7 +291,7 @@ DAY_OF_WEEK(datetime_exp) <1> Extract the day of the week from a date/datetime. Sunday is `1`, Monday is `2`, etc. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfWeek] -------------------------------------------------- @@ -315,7 +315,7 @@ DAY_OF_YEAR(datetime_exp) <1> Extract the day of the year from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfYear] -------------------------------------------------- @@ -339,7 +339,7 @@ DAY_NAME(datetime_exp) <1> Extract the day of the week from a date/datetime in text format (`Monday`, `Tuesday`...). -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dayName] -------------------------------------------------- @@ -363,7 +363,7 @@ HOUR_OF_DAY(datetime_exp) <1> Extract the hour of the day from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[hourOfDay] -------------------------------------------------- @@ -388,7 +388,7 @@ ISO_DAY_OF_WEEK(datetime_exp) <1> Extract the day of the week from a date/datetime, following the https://en.wikipedia.org/wiki/ISO_week_date[ISO 8601 standard]. Monday is `1`, Tuesday is `2`, etc. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[isoDayOfWeek] -------------------------------------------------- @@ -413,7 +413,7 @@ ISO_WEEK_OF_YEAR(datetime_exp) <1> Extract the week of the year from a date/datetime, following https://en.wikipedia.org/wiki/ISO_week_date[ISO 8601 standard]. The first week of a year is the first week with a majority (4 or more) of its days in January. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[isoWeekOfYear] -------------------------------------------------- @@ -437,7 +437,7 @@ MINUTE_OF_DAY(datetime_exp) <1> Extract the minute of the day from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[minuteOfDay] -------------------------------------------------- @@ -461,7 +461,7 @@ MINUTE_OF_HOUR(datetime_exp) <1> Extract the minute of the hour from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[minuteOfHour] -------------------------------------------------- @@ -485,7 +485,7 @@ MONTH(datetime_exp) <1> Extract the month of the year from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[monthOfYear] -------------------------------------------------- @@ -509,7 +509,7 @@ MONTH_NAME(datetime_exp) <1> Extract the month from a date/datetime in text format (`January`, `February`...). -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[monthName] -------------------------------------------------- @@ -533,7 +533,7 @@ This function offers the same functionality as <> function is used for relative date/time filtering: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[filterNow] -------------------------------------------------- @@ -565,7 +565,7 @@ SECOND_OF_MINUTE(datetime_exp) <1> Extract the second of the minute from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[secondOfMinute] -------------------------------------------------- @@ -589,7 +589,7 @@ QUARTER(datetime_exp) <1> Extract the year quarter the date/datetime falls in. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[quarter] -------------------------------------------------- @@ -613,7 +613,7 @@ This function offers the same functionality as <> function is used for relative date filtering: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[filterToday] -------------------------------------------------- @@ -645,7 +645,7 @@ WEEK_OF_YEAR(datetime_exp) <1> Extract the week of the year from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[weekOfYear] -------------------------------------------------- @@ -669,7 +669,7 @@ YEAR(datetime_exp) <1> Extract the year from a date/datetime. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[year] -------------------------------------------------- @@ -697,14 +697,14 @@ EXTRACT( Extract fields from a date/datetime by specifying the name of a <>. The following -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[extractDayOfYear] -------------------------------------------------- is the equivalent to -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfYear] -------------------------------------------------- diff --git a/docs/reference/sql/functions/grouping.asciidoc b/docs/reference/sql/functions/grouping.asciidoc index 0a498a1aacef0..6f2f5a1b6e4c2 100644 --- a/docs/reference/sql/functions/grouping.asciidoc +++ b/docs/reference/sql/functions/grouping.asciidoc @@ -44,14 +44,14 @@ NOTE:: The histogram in SQL does *NOT* return empty buckets for missing interval `Histogram` can be applied on either numeric fields: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[histogramNumeric] ---- or date/time fields: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[histogramDateTime] ---- @@ -59,14 +59,14 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[histogramDateTime] Expressions inside the histogram are also supported as long as the return type is numeric: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[histogramNumericExpression] ---- Do note that histograms (and grouping functions in general) allow custom expressions but cannot have any functions applied to them in the `GROUP BY`. In other words, the following statement is *NOT* allowed: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[expressionOnHistogramNotAllowed] ---- @@ -75,7 +75,7 @@ as it requires two groupings (one for histogram followed by a second for applyin Instead one can rewrite the query to move the expression on the histogram _inside_ of it: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[histogramDateTimeExpression] ---- diff --git a/docs/reference/sql/functions/like-rlike.asciidoc b/docs/reference/sql/functions/like-rlike.asciidoc index 73212bc113542..2d5ef0b62f93f 100644 --- a/docs/reference/sql/functions/like-rlike.asciidoc +++ b/docs/reference/sql/functions/like-rlike.asciidoc @@ -38,7 +38,7 @@ with the `LIKE` operator: The percent sign represents zero, one or multiple characters. The underscore represents a single number or character. These symbols can be used in combinations. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[simpleLike] ---- @@ -75,7 +75,7 @@ and underscore (`_`); the pattern in this case is a regular expression which all For more details about the regular expressions syntax, https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/util/regex/Pattern.html[Java's Pattern class javadoc] is a good starting point. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[simpleRLike] ---- diff --git a/docs/reference/sql/functions/operators.asciidoc b/docs/reference/sql/functions/operators.asciidoc index 4b7e8990290dd..02841c84b58e5 100644 --- a/docs/reference/sql/functions/operators.asciidoc +++ b/docs/reference/sql/functions/operators.asciidoc @@ -8,7 +8,7 @@ Boolean operator for comparing against one or multiple expressions. [[sql-operators-equality]] ==== `Equality (=)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldEquality] -------------------------------------------------- @@ -16,12 +16,12 @@ include-tagged::{sql-specs}/filter.sql-spec[whereFieldEquality] [[sql-operators-null-safe-equality]] ==== `Null safe Equality (<=>)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[nullEqualsCompareWithNull] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[nullEqualsCompareTwoNulls] -------------------------------------------------- @@ -29,7 +29,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[nullEqualsCompareTwoNulls] [[sql-operators-inequality]] ==== `Inequality (<> or !=)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldNonEquality] -------------------------------------------------- @@ -37,7 +37,7 @@ include-tagged::{sql-specs}/filter.sql-spec[whereFieldNonEquality] [[sql-operators-comparison]] ==== `Comparison (<, <=, >, >=)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldLessThan] -------------------------------------------------- @@ -45,7 +45,7 @@ include-tagged::{sql-specs}/filter.sql-spec[whereFieldLessThan] [[sql-operators-between]] ==== `BETWEEN` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereBetween] -------------------------------------------------- @@ -53,7 +53,7 @@ include-tagged::{sql-specs}/filter.sql-spec[whereBetween] [[sql-operators-is-null]] ==== `IS NULL/IS NOT NULL` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereIsNotNullAndIsNull] -------------------------------------------------- @@ -61,7 +61,7 @@ include-tagged::{sql-specs}/filter.sql-spec[whereIsNotNullAndIsNull] [[sql-operators-in]] ==== `IN (, , ...)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereWithInAndMultipleValues] -------------------------------------------------- @@ -74,7 +74,7 @@ Boolean operator for evaluating one or two expressions. [[sql-operators-and]] ==== `AND` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldAndComparison] -------------------------------------------------- @@ -82,7 +82,7 @@ include-tagged::{sql-specs}/filter.sql-spec[whereFieldAndComparison] [[sql-operators-or]] ==== `OR` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldOrComparison] -------------------------------------------------- @@ -90,7 +90,7 @@ include-tagged::{sql-specs}/filter.sql-spec[whereFieldOrComparison] [[sql-operators-not]] ==== `NOT` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/filter.sql-spec[whereFieldEqualityNot] -------------------------------------------------- @@ -104,7 +104,7 @@ The result is a value of numeric type. [[sql-operators-plus]] ==== `Add (+)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[plus] -------------------------------------------------- @@ -112,7 +112,7 @@ include-tagged::{sql-specs}/arithmetic.sql-spec[plus] [[sql-operators-subtract]] ==== `Subtract (infix -)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[minus] -------------------------------------------------- @@ -120,7 +120,7 @@ include-tagged::{sql-specs}/arithmetic.sql-spec[minus] [[sql-operators-negate]] ==== `Negate (unary -)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[unaryMinus] -------------------------------------------------- @@ -128,7 +128,7 @@ include-tagged::{sql-specs}/arithmetic.sql-spec[unaryMinus] [[sql-operators-multiply]] ==== `Multiply (*)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[multiply] -------------------------------------------------- @@ -136,7 +136,7 @@ include-tagged::{sql-specs}/arithmetic.sql-spec[multiply] [[sql-operators-divide]] ==== `Divide (/)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[divide] -------------------------------------------------- @@ -144,7 +144,7 @@ include-tagged::{sql-specs}/arithmetic.sql-spec[divide] [[sql-operators-remainder]] ==== `Modulo or Remainder(%)` -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/arithmetic.sql-spec[mod] -------------------------------------------------- @@ -157,7 +157,7 @@ include-tagged::{sql-specs}/arithmetic.sql-spec[mod] `::` provides an alternative syntax to the <> function. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[conversionStringToLongCastOperator] -------------------------------------------------- diff --git a/docs/reference/sql/functions/search.asciidoc b/docs/reference/sql/functions/search.asciidoc index 6990f6669d69c..34716e070434f 100644 --- a/docs/reference/sql/functions/search.asciidoc +++ b/docs/reference/sql/functions/search.asciidoc @@ -33,7 +33,7 @@ and <> {es} queries. The first parameter is the field or fields to match against. In case it receives one value only, {es-sql} will use a `match` query to perform the search: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[simpleMatch] ---- @@ -41,7 +41,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[simpleMatch] However, it can also receive a list of fields and their corresponding optional `boost` value. In this case, {es-sql} will use a `multi_match` query to match the documents: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[multiFieldsMatch] ---- @@ -53,22 +53,17 @@ the final score than the `author` field when searching for `frank dune` text in Both options above can be used in combination with the optional third parameter of the `MATCH()` predicate, where one can specify additional configuration parameters (separated by semicolon `;`) for either `match` or `multi_match` queries. For example: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[optionalParamsForMatch] ---- -In the more advanced example above, the `cutoff_frequency` parameter allows specifying an absolute or relative document frequency where -high frequency terms are moved into an optional subquery and are only scored if one of the low frequency (below the cutoff) terms in the -case of an `or` operator or all of the low frequency terms in the case of an `and` operator match. More about this you can find in the -<> page. - NOTE: The allowed optional parameters for a single-field `MATCH()` variant (for the `match` {es} query) are: `analyzer`, `auto_generate_synonyms_phrase_query`, -`cutoff_frequency`, `lenient`, `fuzziness`, `fuzzy_transpositions`, `fuzzy_rewrite`, `minimum_should_match`, `operator`, +`lenient`, `fuzziness`, `fuzzy_transpositions`, `fuzzy_rewrite`, `minimum_should_match`, `operator`, `max_expansions`, `prefix_length`. NOTE: The allowed optional parameters for a multi-field `MATCH()` variant (for the `multi_match` {es} query) are: `analyzer`, `auto_generate_synonyms_phrase_query`, -`cutoff_frequency`, `lenient`, `fuzziness`, `fuzzy_transpositions`, `fuzzy_rewrite`, `minimum_should_match`, `operator`, +`lenient`, `fuzziness`, `fuzzy_transpositions`, `fuzzy_rewrite`, `minimum_should_match`, `operator`, `max_expansions`, `prefix_length`, `slop`, `tie_breaker`, `type`. @@ -95,14 +90,14 @@ Just like `MATCH`, `QUERY` is a full-text search predicate that gives the user c The first parameter is basically the input that will be passed as is to the `query_string` query, which means that anything that `query_string` accepts in its `query` field can be used here as well: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[simpleQueryQuery] ---- A more advanced example, showing more of the features that `query_string` supports, of course possible with {es-sql}: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[advancedQueryQuery] ---- @@ -113,7 +108,7 @@ regex and fuzziness queries for the `name` field. If one needs to customize various configuration options that `query_string` exposes, this can be done using the second _optional_ parameter. Multiple settings can be specified separated by a semicolon `;`: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[optionalParameterQuery] ---- @@ -149,14 +144,14 @@ combined using the same rules as {es}'s Typically `SCORE` is used for ordering the results of a query based on their relevance: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[orderByScore] ---- However, it is perfectly fine to return the score without sorting by it: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[scoreWithMatch] ---- diff --git a/docs/reference/sql/functions/string.asciidoc b/docs/reference/sql/functions/string.asciidoc index 7acc358763512..a82ac66adce1c 100644 --- a/docs/reference/sql/functions/string.asciidoc +++ b/docs/reference/sql/functions/string.asciidoc @@ -24,7 +24,7 @@ ASCII(string_exp) <1> Returns the ASCII code value of the leftmost character of `string_exp` as an integer. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringAscii] -------------------------------------------------- @@ -47,7 +47,7 @@ BIT_LENGTH(string_exp) <1> Returns the length in bits of the `string_exp` input expression. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringBitLength] -------------------------------------------------- @@ -70,7 +70,7 @@ CHAR(code) <1> Returns the character that has the ASCII code value specified by the numeric input. The value should be between 0 and 255; otherwise, the return value is data source–dependent. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringChar] -------------------------------------------------- @@ -93,7 +93,7 @@ CHAR_LENGTH(string_exp) <1> Returns the length in characters of the input, if the string expression is of a character data type; otherwise, returns the length in bytes of the string expression (the smallest integer not less than the number of bits divided by 8). -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringCharLength] -------------------------------------------------- @@ -119,7 +119,7 @@ CONCAT( Returns a character string that is the result of concatenating `string_exp1` to `string_exp2`. If one of the string is `NULL`, the other string will be returned. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringConcat] -------------------------------------------------- @@ -149,7 +149,7 @@ INSERT( Returns a string where `length` characters have been deleted from `source`, beginning at `start`, and where `replacement` has been inserted into `source`, beginning at `start`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringInsert] -------------------------------------------------- @@ -172,7 +172,7 @@ LCASE(string_exp) <1> Returns a string equal to that in `string_exp`, with all uppercase characters converted to lowercase. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringLCase] -------------------------------------------------- @@ -198,7 +198,7 @@ LEFT( Returns the leftmost count characters of `string_exp`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringLeft] -------------------------------------------------- @@ -221,7 +221,7 @@ LENGTH(string_exp) <1> Returns the number of characters in `string_exp`, excluding trailing blanks. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringLength] -------------------------------------------------- @@ -250,12 +250,12 @@ LOCATE( Returns the starting position of the first occurrence of `pattern` within `source`. The search for the first occurrence of `pattern` begins with the first character position in `source` unless the optional argument, `start`, is specified. If `start` is specified, the search begins with the character position indicated by the value of `start`. The first character position in `source` is indicated by the value 1. If `pattern` is not found within `source`, the value 0 is returned. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringLocateWoStart] -------------------------------------------------- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringLocateWithStart] -------------------------------------------------- @@ -278,7 +278,7 @@ LTRIM(string_exp) <1> Returns the characters of `string_exp`, with leading blanks removed. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringLTrim] -------------------------------------------------- @@ -301,7 +301,7 @@ OCTET_LENGTH(string_exp) <1> Returns the length in bytes of the `string_exp` input expression. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringOctetLength] -------------------------------------------------- @@ -327,7 +327,7 @@ POSITION( Returns the position of the `string_exp1` in `string_exp2`. The result is an exact numeric. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringPosition] -------------------------------------------------- @@ -353,7 +353,7 @@ REPEAT( Returns a character string composed of `string_exp` repeated `count` times. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringRepeat] -------------------------------------------------- @@ -381,7 +381,7 @@ REPLACE( Search `source` for occurrences of `pattern`, and replace with `replacement`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringReplace] -------------------------------------------------- @@ -407,7 +407,7 @@ RIGHT( Returns the rightmost count characters of `string_exp`. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringRight] -------------------------------------------------- @@ -430,7 +430,7 @@ RTRIM(string_exp) <1> Returns the characters of `string_exp` with trailing blanks removed. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringRTrim] -------------------------------------------------- @@ -453,7 +453,7 @@ SPACE(count) <1> Returns a character string consisting of `count` spaces. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringSpace] -------------------------------------------------- @@ -481,7 +481,7 @@ SUBSTRING( Returns a character string that is derived from `source`, beginning at the character position specified by `start` for `length` characters. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringSubString] -------------------------------------------------- @@ -504,7 +504,7 @@ UCASE(string_exp) <1> Returns a string equal to that of the input, with all lowercase characters converted to uppercase. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[stringUCase] -------------------------------------------------- diff --git a/docs/reference/sql/functions/system.asciidoc b/docs/reference/sql/functions/system.asciidoc index dfca7d526d3a6..b2d604728c165 100644 --- a/docs/reference/sql/functions/system.asciidoc +++ b/docs/reference/sql/functions/system.asciidoc @@ -24,7 +24,7 @@ Returns the name of the database being queried. In the case of Elasticsearch SQL is the name of the Elasticsearch cluster. This function should always return a non-null value. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[database] -------------------------------------------------- @@ -46,7 +46,7 @@ USER() Returns the username of the authenticated user executing the query. This function can return `null` in case {stack-ov}/elasticsearch-security.html[Security] is disabled. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] -------------------------------------------------- include-tagged::{sql-specs}/docs/docs.csv-spec[user] -------------------------------------------------- diff --git a/docs/reference/sql/functions/type-conversion.asciidoc b/docs/reference/sql/functions/type-conversion.asciidoc index 7f8488be40f64..c6c761305519e 100644 --- a/docs/reference/sql/functions/type-conversion.asciidoc +++ b/docs/reference/sql/functions/type-conversion.asciidoc @@ -25,17 +25,17 @@ Casts the result of the given expression to the target <> with slightly differen Moreover, apart from the standard <> it supports the corresponding https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/explicit-data-type-conversion-function?view=sql-server-2017[ODBC data types]. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[conversionStringToIntConvertODBCDataType] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[conversionStringToIntConvertESDataType] ---- diff --git a/docs/reference/sql/language/indices.asciidoc b/docs/reference/sql/language/indices.asciidoc index 82c7f30fb041e..8f48177ce03a9 100644 --- a/docs/reference/sql/language/indices.asciidoc +++ b/docs/reference/sql/language/indices.asciidoc @@ -14,7 +14,7 @@ is supported _as long_ as it is quoted or escaped as a table identifier. For example: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesEsMultiIndex] ---- @@ -28,7 +28,7 @@ The same kind of patterns can also be used to query multiple indices or tables. For example: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[fromTablePatternQuoted] ---- @@ -44,7 +44,7 @@ or multiple `%` characters. Using `SHOW TABLES` command again: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeWildcard] ---- @@ -53,7 +53,7 @@ The pattern matches all tables that start with `emp`. This command supports _escaping_ as well, for example: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeEscape] ---- @@ -101,13 +101,13 @@ Set to `true` properties `index_include_frozen` in the <> or `index.in dedicated keyword:: Explicitly perform the inclusion through the dedicated `FROZEN` keyword in the `FROM` clause or `INCLUDE FROZEN` in the `SHOW` commands: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesIncludeFrozen] ---- -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[fromTableIncludeFrozen] ---- diff --git a/docs/reference/sql/language/syntax/commands/describe-table.asciidoc b/docs/reference/sql/language/syntax/commands/describe-table.asciidoc index da02f1fa23817..9aad578da479e 100644 --- a/docs/reference/sql/language/syntax/commands/describe-table.asciidoc +++ b/docs/reference/sql/language/syntax/commands/describe-table.asciidoc @@ -30,7 +30,7 @@ DESC `DESC` and `DESCRIBE` are aliases to <>. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[describeTable] ---- diff --git a/docs/reference/sql/language/syntax/commands/select.asciidoc b/docs/reference/sql/language/syntax/commands/select.asciidoc index 08ebe0ae96497..1ae8e219acf25 100644 --- a/docs/reference/sql/language/syntax/commands/select.asciidoc +++ b/docs/reference/sql/language/syntax/commands/select.asciidoc @@ -36,7 +36,7 @@ The general execution of `SELECT` is as follows: As with a table, every output column of a `SELECT` has a name which can be either specified per column through the `AS` keyword : -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[selectColumnAlias] ---- @@ -46,14 +46,14 @@ which is why it is recommended to specify it. assigned by {es-sql} if no name is given: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[selectInline] ---- or if it's a simple column reference, use its name as the column name: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[selectColumn] ---- @@ -63,7 +63,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[selectColumn] To select all the columns in the source, one can use `*`: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[wildcardWithOrder] ---- @@ -89,14 +89,14 @@ Represents the name (optionally qualified) of an existing table, either a concre If the table name contains special SQL characters (such as `.`,`-`,`*`,etc...) use double quotes to escape them: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[fromTableQuoted] ---- The name can be a <> pointing to multiple indices (likely requiring quoting as mentioned above) with the restriction that *all* resolved concrete tables have **exact mapping**. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[fromTablePatternQuoted] ---- @@ -104,7 +104,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[fromTablePatternQuoted] `alias`:: A substitute name for the `FROM` item containing the alias. An alias is used for brevity or to eliminate ambiguity. When an alias is provided, it completely hides the actual name of the table and must be used in its place. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[fromTableAlias] ---- @@ -125,7 +125,7 @@ where: Represents an expression that evaluates to a `boolean`. Only the rows that match the condition (to `true`) are returned. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[basicWhere] ---- @@ -148,34 +148,34 @@ Represents an expression on which rows are being grouped _on_. It can be a colum A common, group by column name: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByColumn] ---- Grouping by output ordinal: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByOrdinal] ---- Grouping by alias: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByAlias] ---- And grouping by column expression (typically used along-side an alias): -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByExpression] ---- Or a mixture of the above: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByMulti] ---- @@ -185,27 +185,27 @@ When a `GROUP BY` clause is used in a `SELECT`, _all_ output expressions must be To wit: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByAndAgg] ---- Expressions over aggregates used in output: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByAndAggExpression] ---- Multiple aggregates used: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByAndMultipleAggs] ---- [TIP] -If custom bucketing is required, it can be achieved with the use of `<>`, +If custom bucketing is required, it can be achieved with the use of <>, as shown <>. [[sql-syntax-group-by-implicit]] @@ -216,14 +216,14 @@ As such, the query emits only a single row (as there is only a single group). A common example is counting the number of records: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByImplicitCount] ---- Of course, multiple aggregations can be applied: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByImplicitMultipleAggs] ---- @@ -249,14 +249,14 @@ Both `WHERE` and `HAVING` are used for filtering however there are several signi . `WHERE` works on individual *rows*, `HAVING` works on the *groups* created by ``GROUP BY`` . `WHERE` is evaluated *before* grouping, `HAVING` is evaluated *after* grouping -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByHaving] ---- Further more, one can use multiple aggregate expressions inside `HAVING` even ones that are not used in the output (`SELECT`): -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByHavingMultiple] ---- @@ -269,14 +269,14 @@ As such, the query emits only a single row (as there is only a single group) and In this example, `HAVING` matches: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByHavingImplicitMatch] ---- //However `HAVING` can also not match, in which case an empty result is returned: // -//["source","sql",subs="attributes,callouts,macros"] +//[source, sql] //---- //include-tagged::{sql-specs}/docs/docs.csv-spec[groupByHavingImplicitNoMatch] //---- @@ -304,7 +304,7 @@ IMPORTANT: When used along-side, `GROUP BY` expression can point _only_ to the c For example, the following query sorts by an arbitrary input field (`page_count`): -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[orderByBasic] ---- @@ -318,26 +318,26 @@ NOTE: With `GROUP BY`, make sure the ordering targets the resulting group - appl For example, to order groups simply indicate the grouping key: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[orderByGroup] ---- Multiple keys can be specified of course: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[groupByMulti] ---- Further more, it is possible to order groups based on aggregations of their values: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[orderByAgg] ---- IMPORTANT: Ordering by aggregation is possible for up to 512 entries for memory consumption reasons. -In cases where the results pass this threshold, use <<`LIMIT`, sql-syntax-limit>> to reduce the number +In cases where the results pass this threshold, use <> to reduce the number of results. [[sql-syntax-order-by-score]] @@ -352,7 +352,7 @@ combined using the same rules as {es}'s To sort based on the `score`, use the special function `SCORE()`: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[orderByScore] ---- @@ -360,7 +360,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[orderByScore] Note that you can return `SCORE()` by using a full-text search predicate in the `WHERE` clause. This is possible even if `SCORE()` is not used for sorting: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[orderByScoreWithMatch] ---- @@ -387,7 +387,7 @@ ALL:: indicates there is no limit and thus all results are being returned. To return -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[limitBasic] ---- diff --git a/docs/reference/sql/language/syntax/commands/show-columns.asciidoc b/docs/reference/sql/language/syntax/commands/show-columns.asciidoc index b21c02358e526..9cb90af6b656f 100644 --- a/docs/reference/sql/language/syntax/commands/show-columns.asciidoc +++ b/docs/reference/sql/language/syntax/commands/show-columns.asciidoc @@ -21,7 +21,7 @@ patterns. List the columns in table and their data type (and other attributes). -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showColumns] ---- diff --git a/docs/reference/sql/language/syntax/commands/show-functions.asciidoc b/docs/reference/sql/language/syntax/commands/show-functions.asciidoc index 47c000e81d9fd..8689788867c20 100644 --- a/docs/reference/sql/language/syntax/commands/show-functions.asciidoc +++ b/docs/reference/sql/language/syntax/commands/show-functions.asciidoc @@ -15,7 +15,7 @@ SHOW FUNCTIONS [LIKE pattern?]? <1> List all the SQL functions and their type. The `LIKE` clause can be used to restrict the list of names to the given pattern. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showFunctions] ---- @@ -23,25 +23,25 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[showFunctions] The list of functions returned can be customized based on the pattern. It can be an exact match: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showFunctionsLikeExact] ---- A wildcard for exactly one character: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showFunctionsLikeChar] ---- A wildcard matching zero or more characters: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showFunctionsLikeWildcard] ---- Or of course, a variation of the above: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showFunctionsWithPattern] ---- diff --git a/docs/reference/sql/language/syntax/commands/show-tables.asciidoc b/docs/reference/sql/language/syntax/commands/show-tables.asciidoc index 554819e24b178..d5a40337713d4 100644 --- a/docs/reference/sql/language/syntax/commands/show-tables.asciidoc +++ b/docs/reference/sql/language/syntax/commands/show-tables.asciidoc @@ -24,7 +24,7 @@ patterns. List the tables available to the current user and their type. -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTables] ---- @@ -32,7 +32,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[showTables] Match multiple indices by using {es} <> notation: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesEsMultiIndex] ---- @@ -40,26 +40,26 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesEsMultiIndex] One can also use the `LIKE` clause to restrict the list of names to the given pattern. The pattern can be an exact match: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeExact] ---- Multiple chars: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeWildcard] ---- A single char: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeOneChar] ---- Or a mixture of single and multiple chars: -["source","sql",subs="attributes,callouts,macros"] +[source, sql] ---- include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeMixed] ---- diff --git a/docs/reference/sql/security.asciidoc b/docs/reference/sql/security.asciidoc index ad946c33e2d98..cbf41b46997a4 100644 --- a/docs/reference/sql/security.asciidoc +++ b/docs/reference/sql/security.asciidoc @@ -33,7 +33,7 @@ the API require `cluster:monitor/main`. The following example configures a role that can run SQL in JDBC querying the `test` and `bort` indices: -["source","yaml",subs="attributes,callouts,macros"] +[source, yaml] -------------------------------------------------- include-tagged::{sql-tests}/security/roles.yml[cli_drivers] -------------------------------------------------- diff --git a/libs/build.gradle b/libs/build.gradle index b0924aa1f54f1..03b5d2c611e71 100644 --- a/libs/build.gradle +++ b/libs/build.gradle @@ -26,19 +26,19 @@ subprojects { /* * Subprojects may depend on the "core" lib but may not depend on any - * other libs. This keeps are dependencies simpler. + * other libs. This keeps our dependencies simpler. */ project.afterEvaluate { configurations.all { Configuration conf -> - dependencies.all { Dependency dep -> - Project depProject = dependencyToProject(dep) + dependencies.matching { it instanceof ProjectDependency }.all { ProjectDependency dep -> + Project depProject = dep.dependencyProject if (depProject != null - && false == depProject.path.equals(':libs:core') + && false == depProject.path.equals(':libs:elasticsearch-core') && false == isEclipse && depProject.path.startsWith(':libs')) { throw new InvalidUserDataException("projects in :libs " + "may not depend on other projects libs except " - + ":libs:core but " + + ":libs:elasticsearch-core but " + "${project.path} depends on ${depProject.path}") } } diff --git a/libs/cli/build.gradle b/libs/cli/build.gradle index b1f3b338255c4..a97c62096a512 100644 --- a/libs/cli/build.gradle +++ b/libs/cli/build.gradle @@ -23,7 +23,7 @@ apply plugin: 'nebula.maven-scm' dependencies { compile 'net.sf.jopt-simple:jopt-simple:5.0.2' - compile "org.elasticsearch:elasticsearch-core:${version}" + compile project(':libs:elasticsearch-core') } test.enabled = false diff --git a/libs/core/build.gradle b/libs/core/build.gradle index 86778c3d23744..b67b213d7b41c 100644 --- a/libs/core/build.gradle +++ b/libs/core/build.gradle @@ -21,30 +21,20 @@ apply plugin: 'nebula.optional-base' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' -archivesBaseName = 'elasticsearch-core' - -publishing { - publications { - nebula { - artifactId = archivesBaseName - } - } -} - dependencies { testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest:${versions.hamcrest}" - if (isEclipse == false || project.path == ":libs:core-tests") { - testCompile("org.elasticsearch.test:framework:${version}") { + if (isEclipse == false || project.path == ":libs:elasticsearch-core-tests") { + testCompile(project(":test:framework")) { exclude group: 'org.elasticsearch', module: 'elasticsearch-core' } } } forbiddenApisMain { - // :libs:core does not depend on server + // :libs:elasticsearch-core does not depend on server // TODO: Need to decide how we want to handle for forbidden signatures with the changes to server replaceSignatureFiles 'jdk-signatures' } @@ -52,7 +42,7 @@ forbiddenApisMain { if (isEclipse) { // in eclipse the project is under a fake root, we need to change around the source sets sourceSets { - if (project.path == ":libs:core") { + if (project.path == ":libs:elasticsearch-core") { main.java.srcDirs = ['java'] main.resources.srcDirs = ['resources'] } else { diff --git a/libs/dissect/build.gradle b/libs/dissect/build.gradle index 853c78646c25b..7e71f86f64f44 100644 --- a/libs/dissect/build.gradle +++ b/libs/dissect/build.gradle @@ -17,17 +17,15 @@ * under the License. */ -archivesBaseName = 'elasticsearch-dissect' - dependencies { - if (isEclipse == false || project.path == ":libs:dissect-tests") { - testCompile("org.elasticsearch.test:framework:${version}") { - exclude group: 'org.elasticsearch', module: 'dissect' + if (isEclipse == false || project.path == ":libs:elasticsearch-dissect-tests") { + testCompile(project(":test:framework")) { + exclude group: 'org.elasticsearch', module: 'elasticsearch-dissect' } } testCompile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - testCompile("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") - testCompile("com.fasterxml.jackson.core:jackson-databind:${versions.jackson}") + testCompile "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + testCompile "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" } forbiddenApisMain { @@ -37,7 +35,7 @@ forbiddenApisMain { if (isEclipse) { // in eclipse the project is under a fake root, we need to change around the source sets sourceSets { - if (project.path == ":libs:dissect") { + if (project.path == ":libs:elasticsearch-dissect") { main.java.srcDirs = ['java'] main.resources.srcDirs = ['resources'] } else { diff --git a/libs/geo/build.gradle b/libs/geo/build.gradle index ab3419b93b9b8..e2e5a11d53586 100644 --- a/libs/geo/build.gradle +++ b/libs/geo/build.gradle @@ -22,8 +22,8 @@ apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' dependencies { - if (isEclipse == false || project.path == ":libs:geo-tests") { - testCompile("org.elasticsearch.test:framework:${version}") { + if (isEclipse == false || project.path == ":libs:elasticsearch-geo-tests") { + testCompile(project(":test:framework")) { exclude group: 'org.elasticsearch', module: 'elasticsearch-geo' } } @@ -38,7 +38,7 @@ forbiddenApisMain { if (isEclipse) { // in eclipse the project is under a fake root, we need to change around the source sets sourceSets { - if (project.path == ":libs:geo") { + if (project.path == ":libs:elasticsearch-geo") { main.java.srcDirs = ['java'] main.resources.srcDirs = ['resources'] } else { diff --git a/libs/grok/build.gradle b/libs/grok/build.gradle index 9ca02df35aabe..ca3634805195d 100644 --- a/libs/grok/build.gradle +++ b/libs/grok/build.gradle @@ -17,16 +17,14 @@ * under the License. */ -archivesBaseName = 'elasticsearch-grok' - dependencies { compile 'org.jruby.joni:joni:2.1.6' // joni dependencies: compile 'org.jruby.jcodings:jcodings:1.0.12' - if (isEclipse == false || project.path == ":libs:grok-tests") { - testCompile("org.elasticsearch.test:framework:${version}") { - exclude group: 'org.elasticsearch', module: 'grok' + if (isEclipse == false || project.path == ":libs:elasticsearch-grok-tests") { + testCompile(project(":test:framework")) { + exclude group: 'org.elasticsearch', module: 'elasticsearch-grok' } } } @@ -38,7 +36,7 @@ forbiddenApisMain { if (isEclipse) { // in eclipse the project is under a fake root, we need to change around the source sets sourceSets { - if (project.path == ":libs:grok") { + if (project.path == ":libs:elasticsearch-grok") { main.java.srcDirs = ['java'] main.resources.srcDirs = ['resources'] } else { diff --git a/libs/nio/build.gradle b/libs/nio/build.gradle index 66436bb040e18..d6d0eaea0ab83 100644 --- a/libs/nio/build.gradle +++ b/libs/nio/build.gradle @@ -19,25 +19,15 @@ apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' -archivesBaseName = 'elasticsearch-nio' - -publishing { - publications { - nebula { - artifactId = archivesBaseName - } - } -} - dependencies { - compile "org.elasticsearch:elasticsearch-core:${version}" + compile project(':libs:elasticsearch-core') testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest:${versions.hamcrest}" - if (isEclipse == false || project.path == ":libs:nio-tests") { - testCompile("org.elasticsearch.test:framework:${version}") { + if (isEclipse == false || project.path == ":libs:elasticsearch-nio-tests") { + testCompile(project(":test:framework")) { exclude group: 'org.elasticsearch', module: 'elasticsearch-nio' } } @@ -46,7 +36,7 @@ dependencies { if (isEclipse) { // in eclipse the project is under a fake root, we need to change around the source sets sourceSets { - if (project.path == ":libs:nio") { + if (project.path == ":libs:elasticsearch-nio") { main.java.srcDirs = ['java'] main.resources.srcDirs = ['resources'] } else { diff --git a/libs/secure-sm/build.gradle b/libs/secure-sm/build.gradle index bbd44afc70a6d..3e79d9ee2e84b 100644 --- a/libs/secure-sm/build.gradle +++ b/libs/secure-sm/build.gradle @@ -19,16 +19,6 @@ apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' -archivesBaseName = 'elasticsearch-secure-sm' - -publishing { - publications { - nebula { - artifactId = archivesBaseName - } - } -} - dependencies { // do not add non-test compile dependencies to secure-sm without a good reason to do so @@ -36,9 +26,9 @@ dependencies { testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest:${versions.hamcrest}" - if (isEclipse == false || project.path == ":libs:secure-sm-tests") { - testCompile("org.elasticsearch.test:framework:${version}") { - exclude group: 'org.elasticsearch', module: 'secure-sm' + if (isEclipse == false || project.path == ":libs:elasticsearch-secure-sm-tests") { + testCompile(project(":test:framework")) { + exclude group: 'org.elasticsearch', module: 'elasticsearch-secure-sm' } } } @@ -50,7 +40,7 @@ forbiddenApisMain { if (isEclipse) { // in Eclipse the project is under a fake root so we need to change around the source sets sourceSets { - if (project.path == ":libs:secure-sm") { + if (project.path == ":libs:elasticsearch-secure-sm") { main.java.srcDirs = ['java'] main.resources.srcDirs = ['resources'] } else { diff --git a/libs/ssl-config/build.gradle b/libs/ssl-config/build.gradle index 860cdcd9e6fc8..71ebd642a4339 100644 --- a/libs/ssl-config/build.gradle +++ b/libs/ssl-config/build.gradle @@ -19,10 +19,10 @@ apply plugin: "nebula.maven-scm" dependencies { - compile "org.elasticsearch:elasticsearch-core:${version}" + compile project(':libs:elasticsearch-core') - if (isEclipse == false || project.path == ":libs:ssl-config-tests") { - testCompile("org.elasticsearch.test:framework:${version}") { + if (isEclipse == false || project.path == ":libs:elasticsearch-ssl-config-tests") { + testCompile(project(":test:framework")) { exclude group: 'org.elasticsearch', module: 'elasticsearch-ssl-config' } } @@ -35,7 +35,7 @@ dependencies { if (isEclipse) { // in eclipse the project is under a fake root, we need to change around the source sets sourceSets { - if (project.path == ":libs:ssl-config") { + if (project.path == ":libs:elasticsearch-ssl-config") { main.java.srcDirs = ['java'] main.resources.srcDirs = ['resources'] } else { diff --git a/libs/x-content/build.gradle b/libs/x-content/build.gradle index 0e99d80da1e00..e54427c058304 100644 --- a/libs/x-content/build.gradle +++ b/libs/x-content/build.gradle @@ -21,18 +21,8 @@ apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' -archivesBaseName = 'elasticsearch-x-content' - -publishing { - publications { - nebula { - artifactId = archivesBaseName - } - } -} - dependencies { - compile "org.elasticsearch:elasticsearch-core:${version}" + compile project(':libs:elasticsearch-core') compile "org.yaml:snakeyaml:${versions.snakeyaml}" compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" @@ -44,8 +34,8 @@ dependencies { testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest:${versions.hamcrest}" - if (isEclipse == false || project.path == ":libs:x-content-tests") { - testCompile("org.elasticsearch.test:framework:${version}") { + if (isEclipse == false || project.path == ":libs:elasticsearch-x-content-tests") { + testCompile(project(":test:framework")) { exclude group: 'org.elasticsearch', module: 'elasticsearch-x-content' } } @@ -61,7 +51,7 @@ forbiddenApisMain { if (isEclipse) { // in eclipse the project is under a fake root, we need to change around the source sets sourceSets { - if (project.path == ":libs:x-content") { + if (project.path == ":libs:elasticsearch-x-content") { main.java.srcDirs = ['java'] main.resources.srcDirs = ['resources'] } else { diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java index ee5e3347f8d99..c80c5bdb0d09a 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java @@ -78,14 +78,63 @@ public static BiConsumer> fromLi }; } + private interface UnknownFieldParser { + + void acceptUnknownField(String parserName, String field, XContentLocation location, XContentParser parser, + Value value, Context context) throws IOException; + } + + private static UnknownFieldParser ignoreUnknown() { + return (n, f, l, p, v, c) -> p.skipChildren(); + } + + private static UnknownFieldParser errorOnUnknown() { + return (n, f, l, p, v, c) -> { + throw new XContentParseException(l, "[" + n + "] unknown field [" + f + "], parser not found"); + }; + } + + /** + * Defines how to consume a parsed undefined field + */ + public interface UnknownFieldConsumer { + void accept(Value target, String field, Object value); + } + + private static UnknownFieldParser consumeUnknownField(UnknownFieldConsumer consumer) { + return (parserName, field, location, parser, value, context) -> { + XContentParser.Token t = parser.currentToken(); + switch (t) { + case VALUE_STRING: + consumer.accept(value, field, parser.text()); + break; + case VALUE_NUMBER: + consumer.accept(value, field, parser.numberValue()); + break; + case VALUE_BOOLEAN: + consumer.accept(value, field, parser.booleanValue()); + break; + case VALUE_NULL: + consumer.accept(value, field, null); + break; + case START_OBJECT: + consumer.accept(value, field, parser.map()); + break; + case START_ARRAY: + consumer.accept(value, field, parser.list()); + break; + default: + throw new XContentParseException(parser.getTokenLocation(), + "[" + parserName + "] cannot parse field [" + field + "] with value type [" + t + "]"); + } + }; + } + private final Map fieldParserMap = new HashMap<>(); private final String name; private final Supplier valueSupplier; - /** - * Should this parser ignore unknown fields? This should generally be set to true only when parsing responses from external systems, - * never when parsing requests from users. - */ - private final boolean ignoreUnknownFields; + + private final UnknownFieldParser unknownFieldParser; /** * Creates a new ObjectParser instance with a name. This name is used to reference the parser in exceptions and messages. @@ -95,25 +144,45 @@ public ObjectParser(String name) { } /** - * Creates a new ObjectParser instance which a name. + * Creates a new ObjectParser instance with a name. * @param name the parsers name, used to reference the parser in exceptions and messages. * @param valueSupplier a supplier that creates a new Value instance used when the parser is used as an inner object parser. */ public ObjectParser(String name, @Nullable Supplier valueSupplier) { - this(name, false, valueSupplier); + this(name, errorOnUnknown(), valueSupplier); } /** - * Creates a new ObjectParser instance which a name. + * Creates a new ObjectParser instance with a name. * @param name the parsers name, used to reference the parser in exceptions and messages. * @param ignoreUnknownFields Should this parser ignore unknown fields? This should generally be set to true only when parsing * responses from external systems, never when parsing requests from users. * @param valueSupplier a supplier that creates a new Value instance used when the parser is used as an inner object parser. */ public ObjectParser(String name, boolean ignoreUnknownFields, @Nullable Supplier valueSupplier) { + this(name, ignoreUnknownFields ? ignoreUnknown() : errorOnUnknown(), valueSupplier); + } + + /** + * Creates a new ObjectParser instance with a name. + * @param name the parsers name, used to reference the parser in exceptions and messages. + * @param unknownFieldConsumer how to consume parsed unknown fields + * @param valueSupplier a supplier that creates a new Value instance used when the parser is used as an inner object parser. + */ + public ObjectParser(String name, UnknownFieldConsumer unknownFieldConsumer, @Nullable Supplier valueSupplier) { + this(name, consumeUnknownField(unknownFieldConsumer), valueSupplier); + } + + /** + * Creates a new ObjectParser instance with a name. + * @param name the parsers name, used to reference the parser in exceptions and messages. + * @param unknownFieldParser how to parse unknown fields + * @param valueSupplier a supplier that creates a new Value instance used when the parser is used as an inner object parser. + */ + private ObjectParser(String name, UnknownFieldParser unknownFieldParser, @Nullable Supplier valueSupplier) { this.name = name; this.valueSupplier = valueSupplier; - this.ignoreUnknownFields = ignoreUnknownFields; + this.unknownFieldParser = unknownFieldParser; } /** @@ -152,17 +221,18 @@ public Value parse(XContentParser parser, Value value, Context context) throws I FieldParser fieldParser = null; String currentFieldName = null; + XContentLocation currentPosition = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); - fieldParser = getParser(currentFieldName, parser); + currentPosition = parser.getTokenLocation(); + fieldParser = fieldParserMap.get(currentFieldName); } else { if (currentFieldName == null) { throw new XContentParseException(parser.getTokenLocation(), "[" + name + "] no field found"); } if (fieldParser == null) { - assert ignoreUnknownFields : "this should only be possible if configured to ignore known fields"; - parser.skipChildren(); // noop if parser points to a value, skips children if parser is start object or start array + unknownFieldParser.acceptUnknownField(name, currentFieldName, currentPosition, parser, value, context); } else { fieldParser.assertSupports(name, parser, currentFieldName); parseSub(parser, fieldParser, currentFieldName, value, context); @@ -363,15 +433,6 @@ private void parseSub(XContentParser parser, FieldParser fieldParser, String cur } } - private FieldParser getParser(String fieldName, XContentParser xContentParser) { - FieldParser parser = fieldParserMap.get(fieldName); - if (parser == null && false == ignoreUnknownFields) { - throw new XContentParseException(xContentParser.getTokenLocation(), - "[" + name + "] unknown field [" + fieldName + "], parser not found"); - } - return parser; - } - private class FieldParser { private final Parser parser; private final EnumSet supportedTokens; diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java index e089b8a956ac8..6002c6bd35076 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java @@ -33,7 +33,9 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.containsString; @@ -733,4 +735,41 @@ public void setFoo(int foo) { this.foo = foo; } } + + private static class ObjectWithArbitraryFields { + String name; + Map fields = new HashMap<>(); + void setField(String key, Object value) { + fields.put(key, value); + } + void setName(String name) { + this.name = name; + } + } + + public void testConsumeUnknownFields() throws IOException { + XContentParser parser = createParser(JsonXContent.jsonXContent, + "{\n" + + " \"test\" : \"foo\",\n" + + " \"test_number\" : 2,\n" + + " \"name\" : \"geoff\",\n" + + " \"test_boolean\" : true,\n" + + " \"test_null\" : null,\n" + + " \"test_array\": [1,2,3,4],\n" + + " \"test_nested\": { \"field\" : \"value\", \"field2\" : [ \"list1\", \"list2\" ] }\n" + + "}"); + ObjectParser op + = new ObjectParser<>("unknown", ObjectWithArbitraryFields::setField, ObjectWithArbitraryFields::new); + op.declareString(ObjectWithArbitraryFields::setName, new ParseField("name")); + + ObjectWithArbitraryFields o = op.parse(parser, null); + assertEquals("geoff", o.name); + assertEquals(6, o.fields.size()); + assertEquals("foo", o.fields.get("test")); + assertEquals(2, o.fields.get("test_number")); + assertEquals(true, o.fields.get("test_boolean")); + assertNull(o.fields.get("test_null")); + assertEquals(List.of(1, 2, 3, 4), o.fields.get("test_array")); + assertEquals(Map.of("field", "value", "field2", List.of("list1", "list2")), o.fields.get("test_nested")); + } } diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml index ce9cc74955729..dca56565e6954 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/50_queries_with_synonyms.yml @@ -1,8 +1,5 @@ --- "Test common terms query with stacked tokens": - - skip: - features: "warnings" - - do: indices.create: index: test @@ -50,135 +47,6 @@ refresh: true - do: - warnings: - - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' - search: - rest_total_hits_as_int: true - body: - query: - common: - field1: - query: the fast brown - cutoff_frequency: 3 - low_freq_operator: or - - match: { hits.total: 3 } - - match: { hits.hits.0._id: "1" } - - match: { hits.hits.1._id: "2" } - - match: { hits.hits.2._id: "3" } - - - do: - warnings: - - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' - search: - rest_total_hits_as_int: true - body: - query: - common: - field1: - query: the fast brown - cutoff_frequency: 3 - low_freq_operator: and - - match: { hits.total: 2 } - - match: { hits.hits.0._id: "1" } - - match: { hits.hits.1._id: "2" } - - - do: - warnings: - - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' - search: - rest_total_hits_as_int: true - body: - query: - common: - field1: - query: the fast brown - cutoff_frequency: 3 - - match: { hits.total: 3 } - - match: { hits.hits.0._id: "1" } - - match: { hits.hits.1._id: "2" } - - match: { hits.hits.2._id: "3" } - - - do: - warnings: - - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' - search: - rest_total_hits_as_int: true - body: - query: - common: - field1: - query: the fast huge fox - minimum_should_match: - low_freq: 3 - - match: { hits.total: 1 } - - match: { hits.hits.0._id: "2" } - - - do: - warnings: - - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' - search: - rest_total_hits_as_int: true - body: - query: - common: - field1: - query: the fast lazy fox brown - cutoff_frequency: 1 - minimum_should_match: - high_freq: 5 - - match: { hits.total: 2 } - - match: { hits.hits.0._id: "2" } - - match: { hits.hits.1._id: "1" } - - - do: - warnings: - - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' - search: - rest_total_hits_as_int: true - body: - query: - common: - field1: - query: the fast lazy fox brown - cutoff_frequency: 1 - minimum_should_match: - high_freq: 6 - - match: { hits.total: 1 } - - match: { hits.hits.0._id: "2" } - - - do: - warnings: - - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' - search: - rest_total_hits_as_int: true - body: - query: - common: - field1: - query: the fast lazy fox brown - cutoff_frequency: 1 - - match: { hits.total: 1 } - - match: { hits.hits.0._id: "2" } - - - do: - warnings: - - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' - search: - rest_total_hits_as_int: true - body: - query: - common: - field1: - query: the quick brown - cutoff_frequency: 3 - - match: { hits.total: 3 } - - match: { hits.hits.0._id: "1" } - - match: { hits.hits.1._id: "2" } - - match: { hits.hits.2._id: "3" } - - - do: - warnings: - - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -186,15 +54,12 @@ match: field1: query: the fast brown - cutoff_frequency: 3 operator: and - match: { hits.total: 2 } - match: { hits.hits.0._id: "1" } - match: { hits.hits.1._id: "2" } - do: - warnings: - - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -202,7 +67,6 @@ match: field1: query: the fast brown - cutoff_frequency: 3 operator: or - match: { hits.total: 3 } - match: { hits.hits.0._id: "1" } @@ -210,8 +74,6 @@ - match: { hits.hits.2._id: "3" } - do: - warnings: - - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -219,7 +81,6 @@ match: field1: query: the fast brown - cutoff_frequency: 3 minimum_should_match: 3 - match: { hits.total: 2 } - match: { hits.hits.0._id: "1" } @@ -227,7 +88,6 @@ - do: warnings: - - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [multi_match] query can skip block of documents efficiently if the total number of hits is not tracked]' search: rest_total_hits_as_int: true body: @@ -235,7 +95,6 @@ multi_match: query: the fast brown fields: [ "field1", "field2" ] - cutoff_frequency: 3 operator: and - match: { hits.total: 3 } - match: { hits.hits.0._id: "3" } diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index a94c375afc3e1..00c444e50e87d 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -25,6 +25,6 @@ esplugin { dependencies { compileOnly project(':modules:lang-painless') - compile project(':libs:grok') - compile project(':libs:dissect') + compile project(':libs:elasticsearch-grok') + compile project(':libs:elasticsearch-dissect') } \ No newline at end of file diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_default_pipeline.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_default_pipeline.yml index 86f4821ddaa23..3d100fad3b027 100644 --- a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_default_pipeline.yml +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/200_default_pipeline.yml @@ -119,6 +119,12 @@ teardown: {"doc":{"bytes_source_field":"2kb"}, "doc_as_upsert":true} {"update":{"_id":"8","_index":"test"}} {"script": "ctx._source.ran_script = true","upsert":{"bytes_source_field":"3kb"}, "scripted_upsert" : true} + {"update":{"_id":"6_alias","_index":"test_alias"}} + {"script":"ctx._source.ran_script = true","upsert":{"bytes_source_field":"1kb"}} + {"update":{"_id":"7_alias","_index":"test_alias"}} + {"doc":{"bytes_source_field":"2kb"}, "doc_as_upsert":true} + {"update":{"_id":"8_alias","_index":"test_alias"}} + {"script": "ctx._source.ran_script = true","upsert":{"bytes_source_field":"3kb"}, "scripted_upsert" : true} - do: mget: @@ -127,6 +133,9 @@ teardown: - { _index: "test", _id: "6" } - { _index: "test", _id: "7" } - { _index: "test", _id: "8" } + - { _index: "test", _id: "6_alias" } + - { _index: "test", _id: "7_alias" } + - { _index: "test", _id: "8_alias" } - match: { docs.0._index: "test" } - match: { docs.0._id: "6" } - match: { docs.0._source.bytes_source_field: "1kb" } @@ -141,6 +150,20 @@ teardown: - match: { docs.2._source.bytes_source_field: "3kb" } - match: { docs.2._source.bytes_target_field: 3072 } - match: { docs.2._source.ran_script: true } + - match: { docs.3._index: "test" } + - match: { docs.3._id: "6_alias" } + - match: { docs.3._source.bytes_source_field: "1kb" } + - match: { docs.3._source.bytes_target_field: 1024 } + - is_false: docs.3._source.ran_script + - match: { docs.4._index: "test" } + - match: { docs.4._id: "7_alias" } + - match: { docs.4._source.bytes_source_field: "2kb" } + - match: { docs.4._source.bytes_target_field: 2048 } + - match: { docs.5._index: "test" } + - match: { docs.5._id: "8_alias" } + - match: { docs.5._source.bytes_source_field: "3kb" } + - match: { docs.5._source.bytes_target_field: 3072 } + - match: { docs.5._source.ran_script: true } # explicit no default pipeline - do: diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 1f6b722ec308c..6cce3850232e0 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -69,7 +69,7 @@ sourceSets { } dependencies { - docCompile "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}" + docCompile project(':server') docCompile project(':modules:lang-painless') } diff --git a/modules/lang-painless/spi/build.gradle b/modules/lang-painless/spi/build.gradle index 7e43a242a23a9..3f25f247a2b67 100644 --- a/modules/lang-painless/spi/build.gradle +++ b/modules/lang-painless/spi/build.gradle @@ -33,7 +33,7 @@ publishing { } dependencies { - compile "org.elasticsearch:elasticsearch:${version}" + compile project(":server") } // no tests...yet? diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DateTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DateTests.java new file mode 100644 index 0000000000000..58357cce3ac96 --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DateTests.java @@ -0,0 +1,180 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import java.time.ZoneId; +import java.time.ZonedDateTime; + +public class DateTests extends ScriptTestCase { + + public void testLongToZonedDateTime() { + assertEquals(ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of("Z")), exec( + "long milliSinceEpoch = 434931330000L;" + + "Instant instant = Instant.ofEpochMilli(milliSinceEpoch);" + + "return ZonedDateTime.ofInstant(instant, ZoneId.of('Z'));" + )); + } + + public void testStringToZonedDateTime() { + assertEquals(ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of("Z")), exec( + "String milliSinceEpochString = '434931330000';" + + "long milliSinceEpoch = Long.parseLong(milliSinceEpochString);" + + "Instant instant = Instant.ofEpochMilli(milliSinceEpoch);" + + "return ZonedDateTime.ofInstant(instant, ZoneId.of('Z'));" + )); + + assertEquals(ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of("Z")), exec( + "String datetime = '1983-10-13T22:15:30Z';" + + "return ZonedDateTime.parse(datetime);" + )); + + assertEquals(ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of("Z")), exec( + "String datetime = 'Thu, 13 Oct 1983 22:15:30 GMT';" + + "return ZonedDateTime.parse(datetime, DateTimeFormatter.RFC_1123_DATE_TIME);" + )); + + assertEquals(ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of("Z")), exec( + "String datetime = 'custom y 1983 m 10 d 13 22:15:30 Z';" + + "DateTimeFormatter dtf = DateTimeFormatter.ofPattern(" + + "\"'custom' 'y' yyyy 'm' MM 'd' dd HH:mm:ss VV\");" + + "return ZonedDateTime.parse(datetime, dtf);" + )); + } + + public void testPiecesToZonedDateTime() { + assertEquals(ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of("Z")), exec( + "int year = 1983;" + + "int month = 10;" + + "int day = 13;" + + "int hour = 22;" + + "int minutes = 15;" + + "int seconds = 30;" + + "int nanos = 0;" + + "String tz = 'Z';" + + "return ZonedDateTime.of(year, month, day, hour, minutes, seconds, nanos, ZoneId.of(tz));" + )); + } + + public void testZonedDatetimeToLong() { + assertEquals(434931330000L, exec( + "ZonedDateTime zdt = ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z'));" + + "return zdt.toInstant().toEpochMilli();" + )); + } + + public void testZonedDateTimeToString() { + assertEquals("1983-10-13T22:15:30Z", exec( + "ZonedDateTime zdt = ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z'));" + + "return zdt.format(DateTimeFormatter.ISO_INSTANT);" + )); + + assertEquals("date: 1983/10/13 time: 22:15:30", exec( + "ZonedDateTime zdt = ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z'));" + + "DateTimeFormatter dtf = DateTimeFormatter.ofPattern(" + + "\"'date:' yyyy/MM/dd 'time:' HH:mm:ss\");" + + "return zdt.format(dtf);" + )); + } + + public void testZonedDateTimeToPieces() { + assertArrayEquals(new int[] {1983, 10, 13, 22, 15, 30, 100}, (int[])exec( + "int[] pieces = new int[7];" + + "ZonedDateTime zdt = ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 100, ZoneId.of('Z'));" + + "pieces[0] = zdt.year;" + + "pieces[1] = zdt.monthValue;" + + "pieces[2] = zdt.dayOfMonth;" + + "pieces[3] = zdt.hour;" + + "pieces[4] = zdt.minute;" + + "pieces[5] = zdt.second;" + + "pieces[6] = zdt.nano;" + + "return pieces;" + )); + } + + public void testLongManipulation() { + assertEquals(ZonedDateTime.of(1983, 10, 13, 22, 15, 27, 0, ZoneId.of("Z")), exec( + "long milliSinceEpoch = 434931330000L;" + + "milliSinceEpoch = milliSinceEpoch - 1000L*3L;" + + "Instant instant = Instant.ofEpochMilli(milliSinceEpoch);" + + "return ZonedDateTime.ofInstant(instant, ZoneId.of('Z'))" + )); + } + + public void testZonedDateTimeManipulation() { + assertEquals(ZonedDateTime.of(1983, 10, 16, 22, 15, 30, 0, ZoneId.of("Z")), exec( + "ZonedDateTime zdt = ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z'));" + + "return zdt.plusDays(3);" + )); + + assertEquals(ZonedDateTime.of(1983, 10, 13, 20, 10, 30, 0, ZoneId.of("Z")), exec( + "ZonedDateTime zdt = ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z'));" + + "return zdt.minusMinutes(125);" + )); + + assertEquals(ZonedDateTime.of(1976, 10, 13, 22, 15, 30, 0, ZoneId.of("Z")), exec( + "ZonedDateTime zdt = ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z'));" + + "return zdt.withYear(1976);" + )); + } + + public void testLongTimeDifference() { + assertEquals(3000L, exec( + "long startTimestamp = 434931327000L;" + + "long endTimestamp = 434931330000L;" + + "return endTimestamp - startTimestamp;" + )); + } + + public void testZonedDateTimeDifference() { + assertEquals(4989L, exec( + "ZonedDateTime zdt1 = ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 11000000, ZoneId.of('Z'));" + + "ZonedDateTime zdt2 = ZonedDateTime.of(1983, 10, 13, 22, 15, 35, 0, ZoneId.of('Z'));" + + "return ChronoUnit.MILLIS.between(zdt1, zdt2);" + )); + + assertEquals(4L, exec( + "ZonedDateTime zdt1 = ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 11000000, ZoneId.of('Z'));" + + "ZonedDateTime zdt2 = ZonedDateTime.of(1983, 10, 17, 22, 15, 35, 0, ZoneId.of('Z'));" + + "return ChronoUnit.DAYS.between(zdt1, zdt2);" + )); + } + + public void compareLongs() { + assertEquals(false, exec( + "long ts1 = 434931327000L;" + + "long ts2 = 434931330000L;" + + "return ts1 > ts2;" + )); + } + + public void compareZonedDateTimes() { + assertEquals(true, exec( + "ZonedDateTime zdt1 = ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z'));" + + "ZonedDateTime zdt2 = ZonedDateTime.of(1983, 10, 17, 22, 15, 35, 0, ZoneId.of('Z'));" + + "return zdt1.isBefore(zdt2);" + )); + + assertEquals(false, exec( + "ZonedDateTime zdt1 = ZonedDateTime.of(1983, 10, 13, 22, 15, 30, 0, ZoneId.of('Z'));" + + "ZonedDateTime zdt2 = ZonedDateTime.of(1983, 10, 17, 22, 15, 35, 0, ZoneId.of('Z'));" + + "return zdt1.isAfter(zdt2);" + )); + } +} diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java index c245e2cb3a20b..d3c4bdedde7d2 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java @@ -22,7 +22,6 @@ import org.apache.lucene.index.PrefixCodedTerms; import org.apache.lucene.index.Term; import org.apache.lucene.queries.BlendedTermQuery; -import org.apache.lucene.queries.CommonTermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; @@ -75,7 +74,6 @@ final class QueryAnalyzer { entry(BoostQuery.class, boostQuery()), entry(TermQuery.class, termQuery()), entry(TermInSetQuery.class, termInSetQuery()), - entry(CommonTermsQuery.class, commonTermsQuery()), entry(BlendedTermQuery.class, blendedTermQuery()), entry(PhraseQuery.class, phraseQuery()), entry(MultiPhraseQuery.class, multiPhraseQuery()), @@ -185,13 +183,6 @@ private static BiFunction synonymQuery() { }; } - private static BiFunction commonTermsQuery() { - return (query, version) -> { - Set terms = ((CommonTermsQuery) query).getTerms().stream().map(QueryExtraction::new).collect(toSet()); - return new Result(false, terms, Math.min(1, terms.size())); - }; - } - private static BiFunction blendedTermQuery() { return (query, version) -> { Set terms = ((BlendedTermQuery) query).getTerms().stream().map(QueryExtraction::new).collect(toSet()); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index b191dd948c574..e487037afaea7 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -46,7 +46,6 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.queries.BlendedTermQuery; -import org.apache.lucene.queries.CommonTermsQuery; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; @@ -84,8 +83,8 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.common.geo.ShapeRelation; +import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; @@ -530,12 +529,6 @@ public void testDuelIdBased() throws Exception { public void testDuelSpecificQueries() throws Exception { List documents = new ArrayList<>(); - CommonTermsQuery commonTermsQuery = new CommonTermsQuery(Occur.SHOULD, Occur.SHOULD, 128); - commonTermsQuery.add(new Term("field", "quick")); - commonTermsQuery.add(new Term("field", "brown")); - commonTermsQuery.add(new Term("field", "fox")); - addQuery(commonTermsQuery, documents); - BlendedTermQuery blendedTermQuery = BlendedTermQuery.dismaxBlendedQuery(new Term[]{new Term("field", "quick"), new Term("field", "brown"), new Term("field", "fox")}, 1.0f); addQuery(blendedTermQuery, documents); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java index ee31a81ae168a..57a6ca15ac593 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java @@ -44,7 +44,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.yamlBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; -import static org.elasticsearch.index.query.QueryBuilders.commonTermsQuery; import static org.elasticsearch.index.query.QueryBuilders.geoBoundingBoxQuery; import static org.elasticsearch.index.query.QueryBuilders.geoDistanceQuery; import static org.elasticsearch.index.query.QueryBuilders.geoPolygonQuery; @@ -356,13 +355,10 @@ public void testPercolatorSpecificQueries() throws Exception { ); client().prepareIndex("test", "type", "1") - .setSource(jsonBuilder().startObject().field("query", commonTermsQuery("field1", "quick brown fox")).endObject()) - .get(); - client().prepareIndex("test", "type", "2") .setSource(jsonBuilder().startObject().field("query", multiMatchQuery("quick brown fox", "field1", "field2") .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)).endObject()) .get(); - client().prepareIndex("test", "type", "3") + client().prepareIndex("test", "type", "2") .setSource(jsonBuilder().startObject().field("query", spanNearQuery(spanTermQuery("field1", "quick"), 0) .addClause(spanTermQuery("field1", "brown")) @@ -372,7 +368,7 @@ public void testPercolatorSpecificQueries() throws Exception { .get(); client().admin().indices().prepareRefresh().get(); - client().prepareIndex("test", "type", "4") + client().prepareIndex("test", "type", "3") .setSource(jsonBuilder().startObject().field("query", spanNotQuery( spanNearQuery(spanTermQuery("field1", "quick"), 0) @@ -387,7 +383,7 @@ public void testPercolatorSpecificQueries() throws Exception { .get(); // doesn't match - client().prepareIndex("test", "type", "5") + client().prepareIndex("test", "type", "4") .setSource(jsonBuilder().startObject().field("query", spanNotQuery( spanNearQuery(spanTermQuery("field1", "quick"), 0) @@ -410,15 +406,13 @@ public void testPercolatorSpecificQueries() throws Exception { .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) .addSort("_id", SortOrder.ASC) .get(); - assertHitCount(response, 4); + assertHitCount(response, 3); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); assertThat(response.getHits().getAt(0).getScore(), equalTo(Float.NaN)); assertThat(response.getHits().getAt(1).getId(), equalTo("2")); assertThat(response.getHits().getAt(1).getScore(), equalTo(Float.NaN)); assertThat(response.getHits().getAt(2).getId(), equalTo("3")); assertThat(response.getHits().getAt(2).getScore(), equalTo(Float.NaN)); - assertThat(response.getHits().getAt(3).getId(), equalTo("4")); - assertThat(response.getHits().getAt(3).getScore(), equalTo(Float.NaN)); } public void testPercolatorQueryWithHighlighting() throws Exception { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java index c07467187f05f..358e9176e19b5 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java @@ -28,8 +28,8 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.Term; import org.apache.lucene.queries.BlendedTermQuery; -import org.apache.lucene.queries.CommonTermsQuery; import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; @@ -44,7 +44,6 @@ import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; -import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.join.QueryBitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.spans.SpanFirstQuery; @@ -520,27 +519,10 @@ public void testExtractQueryMetadata_boostQuery() { assertThat(terms.get(0).bytes(), equalTo(termQuery1.getTerm().bytes())); } - public void testExtractQueryMetadata_commonTermsQuery() { - CommonTermsQuery commonTermsQuery = new CommonTermsQuery(BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD, 100); - commonTermsQuery.add(new Term("_field", "_term1")); - commonTermsQuery.add(new Term("_field", "_term2")); - Result result = analyze(commonTermsQuery, Version.CURRENT); - assertThat(result.verified, is(false)); - assertThat(result.minimumShouldMatch, equalTo(1)); - List terms = new ArrayList<>(result.extractions); - terms.sort(Comparator.comparing(qt -> qt.term)); - assertThat(terms.size(), equalTo(2)); - assertThat(result.minimumShouldMatch, equalTo(1)); - assertThat(terms.get(0).field(), equalTo("_field")); - assertThat(terms.get(0).text(), equalTo("_term1")); - assertThat(terms.get(1).field(), equalTo("_field")); - assertThat(terms.get(1).text(), equalTo("_term2")); - } - public void testExtractQueryMetadata_blendedTermQuery() { Term[] termsArr = new Term[]{new Term("_field", "_term1"), new Term("_field", "_term2")}; - BlendedTermQuery commonTermsQuery = BlendedTermQuery.dismaxBlendedQuery(termsArr, 1.0f); - Result result = analyze(commonTermsQuery, Version.CURRENT); + BlendedTermQuery blendedTermQuery = BlendedTermQuery.dismaxBlendedQuery(termsArr, 1.0f); + Result result = analyze(blendedTermQuery, Version.CURRENT); assertThat(result.verified, is(true)); assertThat(result.minimumShouldMatch, equalTo(1)); List terms = new ArrayList<>(result.extractions); diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 33de853e839e3..78846e2d81dd5 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -54,8 +54,8 @@ test { } dependencies { - compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" - compile "org.elasticsearch:elasticsearch-ssl-config:${version}" + compile project(":client:rest") + compile project(":libs:elasticsearch-ssl-config") // for http - testing reindex from remote testCompile project(path: ':modules:transport-netty4', configuration: 'runtime') // for parent/child testing diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java index fcb8e75700d0c..664632cb527f8 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/discovery/Zen2RestApiIT.java @@ -47,7 +47,7 @@ // These tests are here today so they have access to a proper REST client. They cannot be in :server:integTest since the REST client needs a // proper transport implementation, and they cannot be REST tests today since they need to restart nodes. When #35599 and friends land we // should be able to move these tests to run against a proper cluster instead. TODO do this. -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) public class Zen2RestApiIT extends ESNetty4IntegTestCase { @Override diff --git a/plugins/analysis-stempel/src/main/java/org/elasticsearch/index/analysis/pl/PolishStopTokenFilterFactory.java b/plugins/analysis-stempel/src/main/java/org/elasticsearch/index/analysis/pl/PolishStopTokenFilterFactory.java new file mode 100644 index 0000000000000..32897ad29d7b4 --- /dev/null +++ b/plugins/analysis-stempel/src/main/java/org/elasticsearch/index/analysis/pl/PolishStopTokenFilterFactory.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis.pl; + + +import org.apache.lucene.analysis.CharArraySet; +import org.apache.lucene.analysis.StopFilter; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.pl.PolishAnalyzer; +import org.apache.lucene.search.suggest.analyzing.SuggestStopFilter; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; +import org.elasticsearch.index.analysis.Analysis; + +import java.util.Map; +import java.util.Set; + +import static java.util.Collections.singletonMap; + +public class PolishStopTokenFilterFactory extends AbstractTokenFilterFactory { + private static final Map> NAMED_STOP_WORDS = singletonMap("_polish_", PolishAnalyzer.getDefaultStopSet()); + + private final CharArraySet stopWords; + + private final boolean ignoreCase; + + private final boolean removeTrailing; + + public PolishStopTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { + super(indexSettings, name, settings); + this.ignoreCase = settings.getAsBoolean("ignore_case", false); + this.removeTrailing = settings.getAsBoolean("remove_trailing", true); + this.stopWords = Analysis.parseWords(env, settings, "stopwords", + PolishAnalyzer.getDefaultStopSet(), NAMED_STOP_WORDS, ignoreCase); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + if (removeTrailing) { + return new StopFilter(tokenStream, stopWords); + } else { + return new SuggestStopFilter(tokenStream, stopWords); + } + } + + public Set stopWords() { + return stopWords; + } + + public boolean ignoreCase() { + return ignoreCase; + } + +} diff --git a/plugins/analysis-stempel/src/main/java/org/elasticsearch/plugin/analysis/stempel/AnalysisStempelPlugin.java b/plugins/analysis-stempel/src/main/java/org/elasticsearch/plugin/analysis/stempel/AnalysisStempelPlugin.java index 98dd9634fb961..a523d7dcaa0ce 100644 --- a/plugins/analysis-stempel/src/main/java/org/elasticsearch/plugin/analysis/stempel/AnalysisStempelPlugin.java +++ b/plugins/analysis-stempel/src/main/java/org/elasticsearch/plugin/analysis/stempel/AnalysisStempelPlugin.java @@ -24,6 +24,7 @@ import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.analysis.pl.PolishAnalyzerProvider; import org.elasticsearch.index.analysis.pl.PolishStemTokenFilterFactory; +import org.elasticsearch.index.analysis.pl.PolishStopTokenFilterFactory; import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider; import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.Plugin; @@ -35,7 +36,8 @@ public class AnalysisStempelPlugin extends Plugin implements AnalysisPlugin { @Override public Map> getTokenFilters() { - return singletonMap("polish_stem", PolishStemTokenFilterFactory::new); + return Map.of("polish_stem", PolishStemTokenFilterFactory::new, + "polish_stop", PolishStopTokenFilterFactory::new); } @Override diff --git a/plugins/examples/build.gradle b/plugins/examples/build.gradle index 2b9f3c6433d8f..a251c1bbbe85d 100644 --- a/plugins/examples/build.gradle +++ b/plugins/examples/build.gradle @@ -7,3 +7,22 @@ gradle.projectsEvaluated { } } } + +configure(project('painless-whitelist')) { + configurations.all { + resolutionStrategy.dependencySubstitution { + substitute module('org.elasticsearch.plugin:elasticsearch-scripting-painless-spi') with project(':modules:lang-painless:spi') + substitute module('org.elasticsearch.test:logger-usage') with project(':test:logger-usage') + } + } +} + +configure(project('security-authorization-engine')) { + configurations.all { + resolutionStrategy.dependencySubstitution { + substitute module('org.elasticsearch.plugin:x-pack-core') with project(':x-pack:plugin:core') + substitute module('org.elasticsearch.client:elasticsearch-rest-high-level-client') with project(':client:rest-high-level') + substitute module('org.elasticsearch.test:logger-usage') with project(':test:logger-usage') + } + } +} \ No newline at end of file diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index dd9bccf3799d0..bf9b42d0558b2 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -24,7 +24,7 @@ esplugin { } dependencies { - compile "org.elasticsearch:elasticsearch-nio:${version}" + compile project(':libs:elasticsearch-nio') // network stack compile "io.netty:netty-buffer:${versions.netty}" diff --git a/qa/ccs-unavailable-clusters/build.gradle b/qa/ccs-unavailable-clusters/build.gradle index c1f2bc9627108..ea80ee983b876 100644 --- a/qa/ccs-unavailable-clusters/build.gradle +++ b/qa/ccs-unavailable-clusters/build.gradle @@ -21,5 +21,5 @@ apply plugin: 'elasticsearch.rest-test' apply plugin: 'elasticsearch.test-with-dependencies' dependencies { - testCompile "org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}" + testCompile project(":client:rest-high-level") } diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle index bca12be6754ab..7f923d03f7166 100644 --- a/qa/multi-cluster-search/build.gradle +++ b/qa/multi-cluster-search/build.gradle @@ -22,7 +22,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask apply plugin: 'elasticsearch.standalone-test' dependencies { - testCompile "org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}" + testCompile project(":client:rest-high-level") } task remoteClusterTest(type: RestIntegTestTask) { diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index 7c342436dd3ca..f5cfcdda03cc6 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -35,7 +35,7 @@ dependencies { compile "commons-codec:commons-codec:${versions.commonscodec}" compile "commons-logging:commons-logging:${versions.commonslogging}" - compile project(':libs:core') + compile project(':libs:elasticsearch-core') // pulls in the jar built by this project and its dependencies packagingTest project(path: project.path, configuration: 'runtime') diff --git a/qa/wildfly/build.gradle b/qa/wildfly/build.gradle index bcb55079b8269..ded171ce24cf0 100644 --- a/qa/wildfly/build.gradle +++ b/qa/wildfly/build.gradle @@ -70,9 +70,9 @@ dependencies { compile "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}" compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" compile "org.apache.logging.log4j:log4j-core:${versions.log4j}" - compile project(path: ':client:rest-high-level') + compile project(':client:rest-high-level') wildfly "org.jboss:wildfly:${wildflyVersion}@zip" - testCompile "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}" + testCompile project(':test:framework') } task unzipWildfly(type: Sync) { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix.yml index aa6a5158b4795..f92b0ffda80e3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix.yml @@ -345,19 +345,3 @@ setup: type: bool_prefix fields: [ "my_field1", "my_field2" ] slop: 1 - ---- -"multi_match multiple fields with cutoff_frequency throws exception": - - - do: - catch: /\[cutoff_frequency\] not allowed for type \[bool_prefix\]/ - search: - rest_total_hits_as_int: true - index: test - body: - query: - multi_match: - query: "brown" - type: bool_prefix - fields: [ "my_field1", "my_field2" ] - cutoff_frequency: 0.001 diff --git a/server/build.gradle b/server/build.gradle index d05d4a9f01c08..15aff884d3abe 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -74,13 +74,13 @@ if (!isEclipse && !isIdea) { dependencies { - compile "org.elasticsearch:elasticsearch-core:${version}" - compile "org.elasticsearch:elasticsearch-secure-sm:${version}" - compile "org.elasticsearch:elasticsearch-x-content:${version}" - compile "org.elasticsearch:elasticsearch-geo:${version}" + compile project(':libs:elasticsearch-core') + compile project(':libs:elasticsearch-secure-sm') + compile project(':libs:elasticsearch-x-content') + compile project(":libs:elasticsearch-geo") - compileOnly project(':libs:plugin-classloader') - testRuntime project(':libs:plugin-classloader') + compileOnly project(':libs:elasticsearch-plugin-classloader') + testRuntime project(':libs:elasticsearch-plugin-classloader') // lucene compile "org.apache.lucene:lucene-core:${versions.lucene}" @@ -100,7 +100,7 @@ dependencies { compile "org.apache.lucene:lucene-suggest:${versions.lucene}" // utilities - compile "org.elasticsearch:elasticsearch-cli:${version}" + compile project(":libs:elasticsearch-cli") compile 'com.carrotsearch:hppc:0.7.1' // time handling, remove with java 8 time @@ -127,9 +127,9 @@ dependencies { } if (isEclipse == false || project.path == ":server-tests") { - testCompile("org.elasticsearch.test:framework:${version}") { + testCompile(project(":test:framework")) { // tests use the locally compiled version of server - exclude group: 'org.elasticsearch', module: 'elasticsearch' + exclude group: 'org.elasticsearch', module: 'server' } } } diff --git a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index f823f3a142690..5f00631ad6028 100644 --- a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -22,11 +22,8 @@ import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; -import org.apache.lucene.index.TermStates; import org.apache.lucene.index.TermState; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanClause.Occur; -import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.index.TermStates; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.Query; @@ -278,50 +275,6 @@ public int hashCode() { return Objects.hash(classHash(), Arrays.hashCode(equalsTerms())); } - /** - * @deprecated Since max_score optimization landed in 7.0, normal MultiMatchQuery - * will achieve the same result without any configuration. - */ - @Deprecated - public static BlendedTermQuery commonTermsBlendedQuery(Term[] terms, final float[] boosts, final float maxTermFrequency) { - return new BlendedTermQuery(terms, boosts) { - @Override - protected Query topLevelQuery(Term[] terms, TermStates[] ctx, int[] docFreqs, int maxDoc) { - BooleanQuery.Builder highBuilder = new BooleanQuery.Builder(); - BooleanQuery.Builder lowBuilder = new BooleanQuery.Builder(); - for (int i = 0; i < terms.length; i++) { - Query query = new TermQuery(terms[i], ctx[i]); - if (boosts != null && boosts[i] != 1f) { - query = new BoostQuery(query, boosts[i]); - } - if ((maxTermFrequency >= 1f && docFreqs[i] > maxTermFrequency) - || (docFreqs[i] > (int) Math.ceil(maxTermFrequency - * maxDoc))) { - highBuilder.add(query, BooleanClause.Occur.SHOULD); - } else { - lowBuilder.add(query, BooleanClause.Occur.SHOULD); - } - } - BooleanQuery high = highBuilder.build(); - BooleanQuery low = lowBuilder.build(); - if (low.clauses().isEmpty()) { - BooleanQuery.Builder queryBuilder = new BooleanQuery.Builder(); - for (BooleanClause booleanClause : high) { - queryBuilder.add(booleanClause.getQuery(), Occur.MUST); - } - return queryBuilder.build(); - } else if (high.clauses().isEmpty()) { - return low; - } else { - return new BooleanQuery.Builder() - .add(high, BooleanClause.Occur.SHOULD) - .add(low, BooleanClause.Occur.MUST) - .build(); - } - } - }; - } - public static BlendedTermQuery dismaxBlendedQuery(Term[] terms, final float tieBreakerMultiplier) { return dismaxBlendedQuery(terms, null, tieBreakerMultiplier); } diff --git a/server/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java b/server/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java deleted file mode 100644 index 2d70ed8b90a05..0000000000000 --- a/server/src/main/java/org/apache/lucene/queries/ExtendedCommonTermsQuery.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.lucene.queries; - -import org.apache.lucene.search.BooleanClause.Occur; -import org.elasticsearch.common.lucene.search.Queries; - -/** - * Extended version of {@link CommonTermsQuery} that allows to pass in a - * {@code minimumNumberShouldMatch} specification that uses the actual num of high frequent terms - * to calculate the minimum matching terms. - * - * @deprecated Since max_optimization optimization landed in 7.0, normal MatchQuery - * will achieve the same result without any configuration. - */ -@Deprecated -public class ExtendedCommonTermsQuery extends CommonTermsQuery { - - public ExtendedCommonTermsQuery(Occur highFreqOccur, Occur lowFreqOccur, float maxTermFrequency) { - super(highFreqOccur, lowFreqOccur, maxTermFrequency); - } - - private String lowFreqMinNumShouldMatchSpec; - private String highFreqMinNumShouldMatchSpec; - - @Override - protected int calcLowFreqMinimumNumberShouldMatch(int numOptional) { - return calcMinimumNumberShouldMatch(lowFreqMinNumShouldMatchSpec, numOptional); - } - - protected int calcMinimumNumberShouldMatch(String spec, int numOptional) { - if (spec == null) { - return 0; - } - return Queries.calculateMinShouldMatch(numOptional, spec); - } - - @Override - protected int calcHighFreqMinimumNumberShouldMatch(int numOptional) { - return calcMinimumNumberShouldMatch(highFreqMinNumShouldMatchSpec, numOptional); - } - - public void setHighFreqMinimumNumberShouldMatch(String spec) { - this.highFreqMinNumShouldMatchSpec = spec; - } - - public String getHighFreqMinimumNumberShouldMatchSpec() { - return highFreqMinNumShouldMatchSpec; - } - - public void setLowFreqMinimumNumberShouldMatch(String spec) { - this.lowFreqMinNumShouldMatchSpec = spec; - } - - public String getLowFreqMinimumNumberShouldMatchSpec() { - return lowFreqMinNumShouldMatchSpec; - } - - public float getMaxTermFrequency() { - return this.maxTermFrequency; - } - -} diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 260b443a6a557..51662414e0d07 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1027,7 +1027,7 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.index.shard.ShardNotInPrimaryModeException.class, org.elasticsearch.index.shard.ShardNotInPrimaryModeException::new, 155, - Version.V_6_8_1); + UNKNOWN_VERSION_ADDED); final Class exceptionClass; final CheckedFunction constructor; diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index ec79c17a834f1..18dcd192efa72 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -46,10 +46,6 @@ public class Version implements Comparable, ToXContentFragment { */ public static final int V_EMPTY_ID = 0; public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); - public static final int V_6_8_0_ID = 6080099; - public static final Version V_6_8_0 = new Version(V_6_8_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); - public static final int V_6_8_1_ID = 6080199; - public static final Version V_6_8_1 = new Version(V_6_8_1_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_7_0_0_ID = 7000099; public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_0_1_ID = 7000199; @@ -96,10 +92,6 @@ public static Version fromId(int id) { return V_7_0_1; case V_7_0_0_ID: return V_7_0_0; - case V_6_8_1_ID: - return V_6_8_1; - case V_6_8_0_ID: - return V_6_8_0; case V_EMPTY_ID: return V_EMPTY; default: @@ -283,8 +275,11 @@ public Version minimumCompatibilityVersion() { if (major == 6) { // force the minimum compatibility for version 6 to 5.6 since we don't reference version 5 anymore return Version.fromId(5060099); - } else if (major >= 7) { - // all major versions from 7 onwards are compatible with last minor series of the previous major + } else if (major == 7) { + // force the minimum compatibility for version 7 to 6.8 since we don't reference version 6 anymore + return Version.fromId(6080099); + } else if (major >= 8) { + // all major versions from 8 onwards are compatible with last minor series of the previous major Version bwcVersion = null; for (int i = DeclaredVersionsHolder.DECLARED_VERSIONS.size() - 1; i >= 0; i--) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java index 3677cd6cb4e43..6dfa4bf4c4459 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java @@ -20,9 +20,35 @@ package org.elasticsearch.action.admin.indices.analyze; import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.single.shard.SingleShardRequest; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; -public class AnalyzeAction extends Action { +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class AnalyzeAction extends Action { public static final AnalyzeAction INSTANCE = new AnalyzeAction(); public static final String NAME = "indices:admin/analyze"; @@ -32,12 +58,814 @@ private AnalyzeAction() { } @Override - public Writeable.Reader getResponseReader() { - return AnalyzeResponse::new; + public Writeable.Reader getResponseReader() { + return Response::new; } @Override - public AnalyzeResponse newResponse() { + public Response newResponse() { throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } + + /** + * A request to analyze a text associated with a specific index. Allow to provide + * the actual analyzer name to perform the analysis with. + */ + public static class Request extends SingleShardRequest { + + private String[] text; + private String analyzer; + private NameOrDefinition tokenizer; + private final List tokenFilters = new ArrayList<>(); + private final List charFilters = new ArrayList<>(); + private String field; + private boolean explain = false; + private String[] attributes = Strings.EMPTY_ARRAY; + private String normalizer; + + public static class NameOrDefinition implements Writeable { + // exactly one of these two members is not null + public final String name; + public final Settings definition; + + NameOrDefinition(String name) { + this.name = Objects.requireNonNull(name); + this.definition = null; + } + + NameOrDefinition(Map definition) { + this.name = null; + Objects.requireNonNull(definition); + try { + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + builder.map(definition); + this.definition = Settings.builder().loadFromSource(Strings.toString(builder), builder.contentType()).build(); + } catch (IOException e) { + throw new IllegalArgumentException("Failed to parse [" + definition + "]", e); + } + } + + NameOrDefinition(StreamInput in) throws IOException { + name = in.readOptionalString(); + if (in.readBoolean()) { + definition = Settings.readSettingsFromStream(in); + } else { + definition = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(name); + boolean isNotNullDefinition = this.definition != null; + out.writeBoolean(isNotNullDefinition); + if (isNotNullDefinition) { + Settings.writeSettingsToStream(definition, out); + } + } + + public static NameOrDefinition fromXContent(XContentParser parser) throws IOException { + if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { + return new NameOrDefinition(parser.text()); + } + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + return new NameOrDefinition(parser.map()); + } + throw new XContentParseException(parser.getTokenLocation(), + "Expected [VALUE_STRING] or [START_OBJECT], got " + parser.currentToken()); + } + + } + + public Request() { + } + + /** + * Constructs a new analyzer request for the provided index. + * + * @param index The text to analyze + */ + public Request(String index) { + this.index(index); + } + + public String[] text() { + return this.text; + } + + public Request text(String... text) { + this.text = text; + return this; + } + + public Request text(List text) { + this.text = text.toArray(new String[]{}); + return this; + } + + public Request analyzer(String analyzer) { + this.analyzer = analyzer; + return this; + } + + public String analyzer() { + return this.analyzer; + } + + public Request tokenizer(String tokenizer) { + this.tokenizer = new NameOrDefinition(tokenizer); + return this; + } + + public Request tokenizer(Map tokenizer) { + this.tokenizer = new NameOrDefinition(tokenizer); + return this; + } + + public void tokenizer(NameOrDefinition tokenizer) { + this.tokenizer = tokenizer; + } + + public NameOrDefinition tokenizer() { + return this.tokenizer; + } + + public Request addTokenFilter(String tokenFilter) { + this.tokenFilters.add(new NameOrDefinition(tokenFilter)); + return this; + } + + public Request addTokenFilter(Map tokenFilter) { + this.tokenFilters.add(new NameOrDefinition(tokenFilter)); + return this; + } + + public void setTokenFilters(List tokenFilters) { + this.tokenFilters.addAll(tokenFilters); + } + + public List tokenFilters() { + return this.tokenFilters; + } + + public Request addCharFilter(Map charFilter) { + this.charFilters.add(new NameOrDefinition(charFilter)); + return this; + } + + public Request addCharFilter(String charFilter) { + this.charFilters.add(new NameOrDefinition(charFilter)); + return this; + } + + public void setCharFilters(List charFilters) { + this.charFilters.addAll(charFilters); + } + + public List charFilters() { + return this.charFilters; + } + + public Request field(String field) { + this.field = field; + return this; + } + + public String field() { + return this.field; + } + + public Request explain(boolean explain) { + this.explain = explain; + return this; + } + + public boolean explain() { + return this.explain; + } + + public Request attributes(String... attributes) { + if (attributes == null) { + throw new IllegalArgumentException("attributes must not be null"); + } + this.attributes = attributes; + return this; + } + + public void attributes(List attributes) { + this.attributes = attributes.toArray(new String[]{}); + } + + public String[] attributes() { + return this.attributes; + } + + public String normalizer() { + return this.normalizer; + } + + public Request normalizer(String normalizer) { + this.normalizer = normalizer; + return this; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (text == null || text.length == 0) { + validationException = addValidationError("text is missing", validationException); + } + if ((index == null || index.length() == 0) && normalizer != null) { + validationException = addValidationError("index is required if normalizer is specified", validationException); + } + if (normalizer != null && (tokenizer != null || analyzer != null)) { + validationException + = addValidationError("tokenizer/analyze should be null if normalizer is specified", validationException); + } + if (analyzer != null && (tokenizer != null || charFilters.isEmpty() == false || tokenFilters.isEmpty() == false)) { + validationException + = addValidationError("cannot define extra components on a named analyzer", validationException); + } + if (normalizer != null && (tokenizer != null || charFilters.isEmpty() == false || tokenFilters.isEmpty() == false)) { + validationException + = addValidationError("cannot define extra components on a named normalizer", validationException); + } + if (field != null && (tokenizer != null || charFilters.isEmpty() == false || tokenFilters.isEmpty() == false)) { + validationException + = addValidationError("cannot define extra components on a field-specific analyzer", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + text = in.readStringArray(); + analyzer = in.readOptionalString(); + tokenizer = in.readOptionalWriteable(NameOrDefinition::new); + tokenFilters.addAll(in.readList(NameOrDefinition::new)); + charFilters.addAll(in.readList(NameOrDefinition::new)); + field = in.readOptionalString(); + explain = in.readBoolean(); + attributes = in.readStringArray(); + normalizer = in.readOptionalString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(text); + out.writeOptionalString(analyzer); + out.writeOptionalWriteable(tokenizer); + out.writeList(tokenFilters); + out.writeList(charFilters); + out.writeOptionalString(field); + out.writeBoolean(explain); + out.writeStringArray(attributes); + out.writeOptionalString(normalizer); + } + + public static Request fromXContent(XContentParser parser, String index) throws IOException { + Request request = new Request(index); + PARSER.parse(parser, request, null); + return request; + } + + private static final ObjectParser PARSER = new ObjectParser<>("analyze_request", null); + static { + PARSER.declareStringArray(Request::text, new ParseField("text")); + PARSER.declareString(Request::analyzer, new ParseField("analyzer")); + PARSER.declareField(Request::tokenizer, (p, c) -> NameOrDefinition.fromXContent(p), + new ParseField("tokenizer"), ObjectParser.ValueType.OBJECT_OR_STRING); + PARSER.declareObjectArray(Request::setTokenFilters, (p, c) -> NameOrDefinition.fromXContent(p), + new ParseField("filter")); + PARSER.declareObjectArray(Request::setCharFilters, (p, c) -> NameOrDefinition.fromXContent(p), + new ParseField("char_filter")); + PARSER.declareString(Request::field, new ParseField("field")); + PARSER.declareBoolean(Request::explain, new ParseField("explain")); + PARSER.declareStringArray(Request::attributes, new ParseField("attributes")); + PARSER.declareString(Request::normalizer, new ParseField("normalizer")); + } + + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private final DetailAnalyzeResponse detail; + private final List tokens; + + public Response(List tokens, DetailAnalyzeResponse detail) { + this.tokens = tokens; + this.detail = detail; + } + + public Response(StreamInput in) throws IOException { + super.readFrom(in); + int size = in.readVInt(); + if (size > 0) { + tokens = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + tokens.add(new AnalyzeToken(in)); + } + } + else { + tokens = null; + } + detail = in.readOptionalWriteable(DetailAnalyzeResponse::new); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); + } + + public List getTokens() { + return this.tokens; + } + + public DetailAnalyzeResponse detail() { + return this.detail; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (tokens != null) { + builder.startArray(Fields.TOKENS); + for (AnalyzeToken token : tokens) { + token.toXContent(builder, params); + } + builder.endArray(); + } + + if (detail != null) { + builder.startObject(Fields.DETAIL); + detail.toXContent(builder, params); + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + if (tokens != null) { + out.writeVInt(tokens.size()); + for (AnalyzeToken token : tokens) { + token.writeTo(out); + } + } else { + out.writeVInt(0); + } + out.writeOptionalWriteable(detail); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response that = (Response) o; + return Objects.equals(detail, that.detail) && + Objects.equals(tokens, that.tokens); + } + + @Override + public int hashCode() { + return Objects.hash(detail, tokens); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + static final class Fields { + static final String TOKENS = "tokens"; + + static final String DETAIL = "detail"; + } + } + + public static class AnalyzeToken implements Writeable, ToXContentObject { + private final String term; + private final int startOffset; + private final int endOffset; + private final int position; + private final int positionLength; + private final Map attributes; + private final String type; + + static final String TOKEN = "token"; + static final String START_OFFSET = "start_offset"; + static final String END_OFFSET = "end_offset"; + static final String TYPE = "type"; + static final String POSITION = "position"; + static final String POSITION_LENGTH = "positionLength"; + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AnalyzeToken that = (AnalyzeToken) o; + return startOffset == that.startOffset && + endOffset == that.endOffset && + position == that.position && + positionLength == that.positionLength && + Objects.equals(term, that.term) && + Objects.equals(attributes, that.attributes) && + Objects.equals(type, that.type); + } + + @Override + public int hashCode() { + return Objects.hash(term, startOffset, endOffset, position, positionLength, attributes, type); + } + + public AnalyzeToken(String term, int position, int startOffset, int endOffset, int positionLength, + String type, Map attributes) { + this.term = term; + this.position = position; + this.startOffset = startOffset; + this.endOffset = endOffset; + this.positionLength = positionLength; + this.type = type; + this.attributes = attributes; + } + + AnalyzeToken(StreamInput in) throws IOException { + term = in.readString(); + startOffset = in.readInt(); + endOffset = in.readInt(); + position = in.readVInt(); + Integer len = in.readOptionalVInt(); + if (len != null) { + positionLength = len; + } else { + positionLength = 1; + } + type = in.readOptionalString(); + attributes = in.readMap(); + } + + public String getTerm() { + return this.term; + } + + public int getStartOffset() { + return this.startOffset; + } + + public int getEndOffset() { + return this.endOffset; + } + + public int getPosition() { + return this.position; + } + + public int getPositionLength() { + return this.positionLength; + } + + public String getType() { + return this.type; + } + + public Map getAttributes(){ + return this.attributes; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(TOKEN, term); + builder.field(START_OFFSET, startOffset); + builder.field(END_OFFSET, endOffset); + builder.field(TYPE, type); + builder.field(POSITION, position); + if (positionLength > 1) { + builder.field(POSITION_LENGTH, positionLength); + } + if (attributes != null && !attributes.isEmpty()) { + Map sortedAttributes = new TreeMap<>(attributes); + for (Map.Entry entity : sortedAttributes.entrySet()) { + builder.field(entity.getKey(), entity.getValue()); + } + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(term); + out.writeInt(startOffset); + out.writeInt(endOffset); + out.writeVInt(position); + out.writeOptionalVInt(positionLength > 1 ? positionLength : null); + out.writeOptionalString(type); + out.writeMapWithConsistentOrder(attributes); + } + } + + public static class DetailAnalyzeResponse implements Writeable, ToXContentFragment { + + private final boolean customAnalyzer; + private final AnalyzeTokenList analyzer; + private final CharFilteredText[] charfilters; + private final AnalyzeTokenList tokenizer; + private final AnalyzeTokenList[] tokenfilters; + + public DetailAnalyzeResponse(AnalyzeTokenList analyzer) { + this(false, analyzer, null, null, null); + } + + public DetailAnalyzeResponse(CharFilteredText[] charfilters, AnalyzeTokenList tokenizer, AnalyzeTokenList[] tokenfilters) { + this(true, null, charfilters, tokenizer, tokenfilters); + } + + DetailAnalyzeResponse(boolean customAnalyzer, + AnalyzeTokenList analyzer, + CharFilteredText[] charfilters, + AnalyzeTokenList tokenizer, + AnalyzeTokenList[] tokenfilters) { + this.customAnalyzer = customAnalyzer; + this.analyzer = analyzer; + this.charfilters = charfilters; + this.tokenizer = tokenizer; + this.tokenfilters = tokenfilters; + } + + DetailAnalyzeResponse(StreamInput in) throws IOException { + this.customAnalyzer = in.readBoolean(); + if (customAnalyzer) { + tokenizer = new AnalyzeTokenList(in); + int size = in.readVInt(); + if (size > 0) { + charfilters = new CharFilteredText[size]; + for (int i = 0; i < size; i++) { + charfilters[i] = new CharFilteredText(in); + } + } else { + charfilters = null; + } + size = in.readVInt(); + if (size > 0) { + tokenfilters = new AnalyzeTokenList[size]; + for (int i = 0; i < size; i++) { + tokenfilters[i] = new AnalyzeTokenList(in); + } + } else { + tokenfilters = null; + } + analyzer = null; + } else { + analyzer = new AnalyzeTokenList(in); + tokenfilters = null; + tokenizer = null; + charfilters = null; + } + } + + public AnalyzeTokenList analyzer() { + return this.analyzer; + } + + public CharFilteredText[] charfilters() { + return this.charfilters; + } + + public AnalyzeTokenList tokenizer() { + return tokenizer; + } + + public AnalyzeTokenList[] tokenfilters() { + return tokenfilters; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DetailAnalyzeResponse that = (DetailAnalyzeResponse) o; + return customAnalyzer == that.customAnalyzer && + Objects.equals(analyzer, that.analyzer) && + Arrays.equals(charfilters, that.charfilters) && + Objects.equals(tokenizer, that.tokenizer) && + Arrays.equals(tokenfilters, that.tokenfilters); + } + + @Override + public int hashCode() { + int result = Objects.hash(customAnalyzer, analyzer, tokenizer); + result = 31 * result + Arrays.hashCode(charfilters); + result = 31 * result + Arrays.hashCode(tokenfilters); + return result; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("custom_analyzer", customAnalyzer); + + if (analyzer != null) { + builder.startObject("analyzer"); + analyzer.toXContentWithoutObject(builder, params); + builder.endObject(); + } + + if (charfilters != null) { + builder.startArray("charfilters"); + for (CharFilteredText charfilter : charfilters) { + charfilter.toXContent(builder, params); + } + builder.endArray(); + } + + if (tokenizer != null) { + builder.startObject("tokenizer"); + tokenizer.toXContentWithoutObject(builder, params); + builder.endObject(); + } + + if (tokenfilters != null) { + builder.startArray("tokenfilters"); + for (AnalyzeTokenList tokenfilter : tokenfilters) { + tokenfilter.toXContent(builder, params); + } + builder.endArray(); + } + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(customAnalyzer); + if (customAnalyzer) { + tokenizer.writeTo(out); + if (charfilters != null) { + out.writeVInt(charfilters.length); + for (CharFilteredText charfilter : charfilters) { + charfilter.writeTo(out); + } + } else { + out.writeVInt(0); + } + if (tokenfilters != null) { + out.writeVInt(tokenfilters.length); + for (AnalyzeTokenList tokenfilter : tokenfilters) { + tokenfilter.writeTo(out); + } + } else { + out.writeVInt(0); + } + } else { + analyzer.writeTo(out); + } + } + } + + public static class AnalyzeTokenList implements Writeable, ToXContentObject { + private final String name; + private final AnalyzeToken[] tokens; + + static final String NAME = "name"; + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AnalyzeTokenList that = (AnalyzeTokenList) o; + return Objects.equals(name, that.name) && + Arrays.equals(tokens, that.tokens); + } + + @Override + public int hashCode() { + int result = Objects.hash(name); + result = 31 * result + Arrays.hashCode(tokens); + return result; + } + + public AnalyzeTokenList(String name, AnalyzeToken[] tokens) { + this.name = name; + this.tokens = tokens; + } + + AnalyzeTokenList(StreamInput in) throws IOException { + name = in.readString(); + int size = in.readVInt(); + if (size > 0) { + tokens = new AnalyzeToken[size]; + for (int i = 0; i < size; i++) { + tokens[i] = new AnalyzeToken(in); + } + } + else { + tokens = null; + } + } + + public String getName() { + return name; + } + + public AnalyzeToken[] getTokens() { + return tokens; + } + + void toXContentWithoutObject(XContentBuilder builder, Params params) throws IOException { + builder.field(NAME, this.name); + builder.startArray(Response.Fields.TOKENS); + if (tokens != null) { + for (AnalyzeToken token : tokens) { + token.toXContent(builder, params); + } + } + builder.endArray(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + toXContentWithoutObject(builder, params); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + if (tokens != null) { + out.writeVInt(tokens.length); + for (AnalyzeToken token : tokens) { + token.writeTo(out); + } + } else { + out.writeVInt(0); + } + } + } + + public static class CharFilteredText implements Writeable, ToXContentObject { + private final String name; + private final String[] texts; + + static final String NAME = "name"; + static final String FILTERED_TEXT = "filtered_text"; + + public CharFilteredText(String name, String[] texts) { + this.name = name; + if (texts != null) { + this.texts = texts; + } else { + this.texts = Strings.EMPTY_ARRAY; + } + } + + CharFilteredText(StreamInput in) throws IOException { + name = in.readString(); + texts = in.readStringArray(); + } + + public String getName() { + return name; + } + + public String[] getTexts() { + return texts; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NAME, name); + builder.array(FILTERED_TEXT, texts); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeStringArray(texts); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CharFilteredText that = (CharFilteredText) o; + return Objects.equals(name, that.name) && + Arrays.equals(texts, that.texts); + } + + @Override + public int hashCode() { + int result = Objects.hash(name); + result = 31 * result + Arrays.hashCode(texts); + return result; + } + } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequest.java deleted file mode 100644 index a2712c2d4c107..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequest.java +++ /dev/null @@ -1,302 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.indices.analyze; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.support.single.shard.SingleShardRequest; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ToXContentFragment; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentType; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Objects; - -import static org.elasticsearch.action.ValidateActions.addValidationError; - -/** - * A request to analyze a text associated with a specific index. Allow to provide - * the actual analyzer name to perform the analysis with. - */ -public class AnalyzeRequest extends SingleShardRequest implements ToXContentObject { - - private String[] text; - - private String analyzer; - - private NameOrDefinition tokenizer; - - private final List tokenFilters = new ArrayList<>(); - - private final List charFilters = new ArrayList<>(); - - private String field; - - private boolean explain = false; - - private String[] attributes = Strings.EMPTY_ARRAY; - - private String normalizer; - - public static class NameOrDefinition implements Writeable, ToXContentFragment { - // exactly one of these two members is not null - public final String name; - public final Settings definition; - - NameOrDefinition(String name) { - this.name = Objects.requireNonNull(name); - this.definition = null; - } - - NameOrDefinition(Map definition) { - this.name = null; - Objects.requireNonNull(definition); - try { - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); - builder.map(definition); - this.definition = Settings.builder().loadFromSource(Strings.toString(builder), builder.contentType()).build(); - } catch (IOException e) { - throw new IllegalArgumentException("Failed to parse [" + definition + "]", e); - } - } - - NameOrDefinition(StreamInput in) throws IOException { - name = in.readOptionalString(); - if (in.readBoolean()) { - definition = Settings.readSettingsFromStream(in); - } else { - definition = null; - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalString(name); - boolean isNotNullDefinition = this.definition != null; - out.writeBoolean(isNotNullDefinition); - if (isNotNullDefinition) { - Settings.writeSettingsToStream(definition, out); - } - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - if (definition == null) { - return builder.value(name); - } - return definition.toXContent(builder, params); - } - - } - - public AnalyzeRequest() { - } - - /** - * Constructs a new analyzer request for the provided index. - * - * @param index The text to analyze - */ - public AnalyzeRequest(String index) { - this.index(index); - } - - public String[] text() { - return this.text; - } - - public AnalyzeRequest text(String... text) { - this.text = text; - return this; - } - - public AnalyzeRequest analyzer(String analyzer) { - this.analyzer = analyzer; - return this; - } - - public String analyzer() { - return this.analyzer; - } - - public AnalyzeRequest tokenizer(String tokenizer) { - this.tokenizer = new NameOrDefinition(tokenizer); - return this; - } - - public AnalyzeRequest tokenizer(Map tokenizer) { - this.tokenizer = new NameOrDefinition(tokenizer); - return this; - } - - public NameOrDefinition tokenizer() { - return this.tokenizer; - } - - public AnalyzeRequest addTokenFilter(String tokenFilter) { - this.tokenFilters.add(new NameOrDefinition(tokenFilter)); - return this; - } - - public AnalyzeRequest addTokenFilter(Map tokenFilter) { - this.tokenFilters.add(new NameOrDefinition(tokenFilter)); - return this; - } - - public List tokenFilters() { - return this.tokenFilters; - } - - public AnalyzeRequest addCharFilter(Map charFilter) { - this.charFilters.add(new NameOrDefinition(charFilter)); - return this; - } - - public AnalyzeRequest addCharFilter(String charFilter) { - this.charFilters.add(new NameOrDefinition(charFilter)); - return this; - } - - public List charFilters() { - return this.charFilters; - } - - public AnalyzeRequest field(String field) { - this.field = field; - return this; - } - - public String field() { - return this.field; - } - - public AnalyzeRequest explain(boolean explain) { - this.explain = explain; - return this; - } - - public boolean explain() { - return this.explain; - } - - public AnalyzeRequest attributes(String... attributes) { - if (attributes == null) { - throw new IllegalArgumentException("attributes must not be null"); - } - this.attributes = attributes; - return this; - } - - public String[] attributes() { - return this.attributes; - } - - public String normalizer() { - return this.normalizer; - } - - public AnalyzeRequest normalizer(String normalizer) { - this.normalizer = normalizer; - return this; - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = null; - if (text == null || text.length == 0) { - validationException = addValidationError("text is missing", validationException); - } - if ((index == null || index.length() == 0) && normalizer != null) { - validationException = addValidationError("index is required if normalizer is specified", validationException); - } - if (normalizer != null && (tokenizer != null || analyzer != null)) { - validationException = addValidationError("tokenizer/analyze should be null if normalizer is specified", validationException); - } - return validationException; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - text = in.readStringArray(); - analyzer = in.readOptionalString(); - tokenizer = in.readOptionalWriteable(NameOrDefinition::new); - tokenFilters.addAll(in.readList(NameOrDefinition::new)); - charFilters.addAll(in.readList(NameOrDefinition::new)); - field = in.readOptionalString(); - explain = in.readBoolean(); - attributes = in.readStringArray(); - normalizer = in.readOptionalString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArray(text); - out.writeOptionalString(analyzer); - out.writeOptionalWriteable(tokenizer); - out.writeList(tokenFilters); - out.writeList(charFilters); - out.writeOptionalString(field); - out.writeBoolean(explain); - out.writeStringArray(attributes); - out.writeOptionalString(normalizer); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field("text", text); - if (Strings.isNullOrEmpty(analyzer) == false) { - builder.field("analyzer", analyzer); - } - if (tokenizer != null) { - tokenizer.toXContent(builder, params); - } - if (tokenFilters.size() > 0) { - builder.field("filter", tokenFilters); - } - if (charFilters.size() > 0) { - builder.field("char_filter", charFilters); - } - if (Strings.isNullOrEmpty(field) == false) { - builder.field("field", field); - } - if (explain) { - builder.field("explain", true); - } - if (attributes.length > 0) { - builder.field("attributes", attributes); - } - if (Strings.isNullOrEmpty(normalizer) == false) { - builder.field("normalizer", normalizer); - } - return builder.endObject(); - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java index 3893cb25d9dbb..2bd1724c5e69f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestBuilder.java @@ -23,14 +23,15 @@ import java.util.Map; -public class AnalyzeRequestBuilder extends SingleShardOperationRequestBuilder { +public class AnalyzeRequestBuilder + extends SingleShardOperationRequestBuilder { public AnalyzeRequestBuilder(ElasticsearchClient client, AnalyzeAction action) { - super(client, action, new AnalyzeRequest()); + super(client, action, new AnalyzeAction.Request()); } public AnalyzeRequestBuilder(ElasticsearchClient client, AnalyzeAction action, String index, String... text) { - super(client, action, new AnalyzeRequest(index).text(text)); + super(client, action, new AnalyzeAction.Request(index).text(text)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java deleted file mode 100644 index 7e6d525cefb93..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java +++ /dev/null @@ -1,320 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.indices.analyze; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.TreeMap; - -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; - -public class AnalyzeResponse extends ActionResponse implements Iterable, ToXContentObject { - - public static class AnalyzeToken implements Writeable, ToXContentObject { - private final String term; - private final int startOffset; - private final int endOffset; - private final int position; - private final int positionLength; - private final Map attributes; - private final String type; - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - AnalyzeToken that = (AnalyzeToken) o; - return startOffset == that.startOffset && - endOffset == that.endOffset && - position == that.position && - positionLength == that.positionLength && - Objects.equals(term, that.term) && - Objects.equals(attributes, that.attributes) && - Objects.equals(type, that.type); - } - - @Override - public int hashCode() { - return Objects.hash(term, startOffset, endOffset, position, positionLength, attributes, type); - } - - AnalyzeToken(String term, int position, int startOffset, int endOffset, int positionLength, - String type, Map attributes) { - this.term = term; - this.position = position; - this.startOffset = startOffset; - this.endOffset = endOffset; - this.positionLength = positionLength; - this.type = type; - this.attributes = attributes; - } - - AnalyzeToken(StreamInput in) throws IOException { - term = in.readString(); - startOffset = in.readInt(); - endOffset = in.readInt(); - position = in.readVInt(); - Integer len = in.readOptionalVInt(); - if (len != null) { - positionLength = len; - } else { - positionLength = 1; - } - type = in.readOptionalString(); - attributes = in.readMap(); - } - - public String getTerm() { - return this.term; - } - - public int getStartOffset() { - return this.startOffset; - } - - public int getEndOffset() { - return this.endOffset; - } - - public int getPosition() { - return this.position; - } - - public int getPositionLength() { - return this.positionLength; - } - - public String getType() { - return this.type; - } - - public Map getAttributes(){ - return this.attributes; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(Fields.TOKEN, term); - builder.field(Fields.START_OFFSET, startOffset); - builder.field(Fields.END_OFFSET, endOffset); - builder.field(Fields.TYPE, type); - builder.field(Fields.POSITION, position); - if (positionLength > 1) { - builder.field(Fields.POSITION_LENGTH, positionLength); - } - if (attributes != null && !attributes.isEmpty()) { - Map sortedAttributes = new TreeMap<>(attributes); - for (Map.Entry entity : sortedAttributes.entrySet()) { - builder.field(entity.getKey(), entity.getValue()); - } - } - builder.endObject(); - return builder; - } - - public static AnalyzeToken fromXContent(XContentParser parser) throws IOException { - ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); - String field = null; - String term = ""; - int position = -1; - int startOffset = -1; - int endOffset = -1; - int positionLength = 1; - String type = ""; - Map attributes = new HashMap<>(); - for (XContentParser.Token t = parser.nextToken(); t != XContentParser.Token.END_OBJECT; t = parser.nextToken()) { - if (t == XContentParser.Token.FIELD_NAME) { - field = parser.currentName(); - continue; - } - if (Fields.TOKEN.equals(field)) { - term = parser.text(); - } else if (Fields.POSITION.equals(field)) { - position = parser.intValue(); - } else if (Fields.START_OFFSET.equals(field)) { - startOffset = parser.intValue(); - } else if (Fields.END_OFFSET.equals(field)) { - endOffset = parser.intValue(); - } else if (Fields.POSITION_LENGTH.equals(field)) { - positionLength = parser.intValue(); - } else if (Fields.TYPE.equals(field)) { - type = parser.text(); - } else { - if (t == XContentParser.Token.VALUE_STRING) { - attributes.put(field, parser.text()); - } else if (t == XContentParser.Token.VALUE_NUMBER) { - attributes.put(field, parser.numberValue()); - } else if (t == XContentParser.Token.VALUE_BOOLEAN) { - attributes.put(field, parser.booleanValue()); - } else if (t == XContentParser.Token.START_OBJECT) { - attributes.put(field, parser.map()); - } else if (t == XContentParser.Token.START_ARRAY) { - attributes.put(field, parser.list()); - } - } - } - return new AnalyzeToken(term, position, startOffset, endOffset, positionLength, type, attributes); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(term); - out.writeInt(startOffset); - out.writeInt(endOffset); - out.writeVInt(position); - out.writeOptionalVInt(positionLength > 1 ? positionLength : null); - out.writeOptionalString(type); - out.writeMapWithConsistentOrder(attributes); - } - } - - private final DetailAnalyzeResponse detail; - private final List tokens; - - public AnalyzeResponse(List tokens, DetailAnalyzeResponse detail) { - this.tokens = tokens; - this.detail = detail; - } - - public AnalyzeResponse(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - if (size > 0) { - tokens = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - tokens.add(new AnalyzeToken(in)); - } - } - else { - tokens = null; - } - detail = in.readOptionalWriteable(DetailAnalyzeResponse::new); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - public List getTokens() { - return this.tokens; - } - - public DetailAnalyzeResponse detail() { - return this.detail; - } - - @Override - public Iterator iterator() { - return tokens.iterator(); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - if (tokens != null) { - builder.startArray(Fields.TOKENS); - for (AnalyzeToken token : tokens) { - token.toXContent(builder, params); - } - builder.endArray(); - } - - if (detail != null) { - builder.startObject(Fields.DETAIL); - detail.toXContent(builder, params); - builder.endObject(); - } - builder.endObject(); - return builder; - } - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("analyze_response", - true, args -> new AnalyzeResponse((List) args[0], (DetailAnalyzeResponse) args[1])); - static { - PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> AnalyzeToken.fromXContent(p), new ParseField(Fields.TOKENS)); - PARSER.declareObject(optionalConstructorArg(), DetailAnalyzeResponse.PARSER, new ParseField(Fields.DETAIL)); - } - - public static AnalyzeResponse fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - if (tokens != null) { - out.writeVInt(tokens.size()); - for (AnalyzeToken token : tokens) { - token.writeTo(out); - } - } else { - out.writeVInt(0); - } - out.writeOptionalWriteable(detail); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - AnalyzeResponse that = (AnalyzeResponse) o; - return Objects.equals(detail, that.detail) && - Objects.equals(tokens, that.tokens); - } - - @Override - public int hashCode() { - return Objects.hash(detail, tokens); - } - - @Override - public String toString() { - return Strings.toString(this, true, true); - } - - static final class Fields { - static final String TOKENS = "tokens"; - static final String TOKEN = "token"; - static final String START_OFFSET = "start_offset"; - static final String END_OFFSET = "end_offset"; - static final String TYPE = "type"; - static final String POSITION = "position"; - static final String POSITION_LENGTH = "positionLength"; - static final String DETAIL = "detail"; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/DetailAnalyzeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/DetailAnalyzeResponse.java deleted file mode 100644 index 1e84d9e0a2e1a..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/DetailAnalyzeResponse.java +++ /dev/null @@ -1,400 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.analyze; - - -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentFragment; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; -import java.lang.reflect.Array; -import java.util.Arrays; -import java.util.List; -import java.util.Objects; - -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; - -public class DetailAnalyzeResponse implements Writeable, ToXContentFragment { - - private final boolean customAnalyzer; - private final AnalyzeTokenList analyzer; - private final CharFilteredText[] charfilters; - private final AnalyzeTokenList tokenizer; - private final AnalyzeTokenList[] tokenfilters; - - public DetailAnalyzeResponse(AnalyzeTokenList analyzer) { - this(false, analyzer, null, null, null); - } - - public DetailAnalyzeResponse(CharFilteredText[] charfilters, AnalyzeTokenList tokenizer, AnalyzeTokenList[] tokenfilters) { - this(true, null, charfilters, tokenizer, tokenfilters); - } - - public DetailAnalyzeResponse(boolean customAnalyzer, - AnalyzeTokenList analyzer, - CharFilteredText[] charfilters, - AnalyzeTokenList tokenizer, - AnalyzeTokenList[] tokenfilters) { - this.customAnalyzer = customAnalyzer; - this.analyzer = analyzer; - this.charfilters = charfilters; - this.tokenizer = tokenizer; - this.tokenfilters = tokenfilters; - } - - public DetailAnalyzeResponse(StreamInput in) throws IOException { - this.customAnalyzer = in.readBoolean(); - if (customAnalyzer) { - tokenizer = new AnalyzeTokenList(in); - int size = in.readVInt(); - if (size > 0) { - charfilters = new CharFilteredText[size]; - for (int i = 0; i < size; i++) { - charfilters[i] = new CharFilteredText(in); - } - } - else { - charfilters = null; - } - size = in.readVInt(); - if (size > 0) { - tokenfilters = new AnalyzeTokenList[size]; - for (int i = 0; i < size; i++) { - tokenfilters[i] = new AnalyzeTokenList(in); - } - } - else { - tokenfilters = null; - } - analyzer = null; - } else { - analyzer = new AnalyzeTokenList(in); - tokenfilters = null; - tokenizer = null; - charfilters = null; - } - } - - public AnalyzeTokenList analyzer() { - return this.analyzer; - } - - public CharFilteredText[] charfilters() { - return this.charfilters; - } - - public AnalyzeTokenList tokenizer() { - return tokenizer; - } - - public AnalyzeTokenList[] tokenfilters() { - return tokenfilters; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - DetailAnalyzeResponse that = (DetailAnalyzeResponse) o; - return customAnalyzer == that.customAnalyzer && - Objects.equals(analyzer, that.analyzer) && - Arrays.equals(charfilters, that.charfilters) && - Objects.equals(tokenizer, that.tokenizer) && - Arrays.equals(tokenfilters, that.tokenfilters); - } - - @Override - public int hashCode() { - int result = Objects.hash(customAnalyzer, analyzer, tokenizer); - result = 31 * result + Arrays.hashCode(charfilters); - result = 31 * result + Arrays.hashCode(tokenfilters); - return result; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(Fields.CUSTOM_ANALYZER, customAnalyzer); - - if (analyzer != null) { - builder.startObject(Fields.ANALYZER); - analyzer.toXContentWithoutObject(builder, params); - builder.endObject(); - } - - if (charfilters != null) { - builder.startArray(Fields.CHARFILTERS); - for (CharFilteredText charfilter : charfilters) { - charfilter.toXContent(builder, params); - } - builder.endArray(); - } - - if (tokenizer != null) { - builder.startObject(Fields.TOKENIZER); - tokenizer.toXContentWithoutObject(builder, params); - builder.endObject(); - } - - if (tokenfilters != null) { - builder.startArray(Fields.TOKENFILTERS); - for (AnalyzeTokenList tokenfilter : tokenfilters) { - tokenfilter.toXContent(builder, params); - } - builder.endArray(); - } - return builder; - } - - @SuppressWarnings("unchecked") - private static T[] fromList(Class clazz, List list) { - if (list == null) { - return null; - } - return list.toArray((T[])Array.newInstance(clazz, 0)); - } - - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("detail", - true, args -> new DetailAnalyzeResponse((boolean) args[0], (AnalyzeTokenList) args[1], - fromList(CharFilteredText.class, (List)args[2]), - (AnalyzeTokenList) args[3], - fromList(AnalyzeTokenList.class, (List)args[4]))); - - static { - PARSER.declareBoolean(constructorArg(), new ParseField(Fields.CUSTOM_ANALYZER)); - PARSER.declareObject(optionalConstructorArg(), AnalyzeTokenList.PARSER, new ParseField(Fields.ANALYZER)); - PARSER.declareObjectArray(optionalConstructorArg(), CharFilteredText.PARSER, new ParseField(Fields.CHARFILTERS)); - PARSER.declareObject(optionalConstructorArg(), AnalyzeTokenList.PARSER, new ParseField(Fields.TOKENIZER)); - PARSER.declareObjectArray(optionalConstructorArg(), AnalyzeTokenList.PARSER, new ParseField(Fields.TOKENFILTERS)); - } - - public static DetailAnalyzeResponse fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - - static final class Fields { - static final String NAME = "name"; - static final String FILTERED_TEXT = "filtered_text"; - static final String CUSTOM_ANALYZER = "custom_analyzer"; - static final String ANALYZER = "analyzer"; - static final String CHARFILTERS = "charfilters"; - static final String TOKENIZER = "tokenizer"; - static final String TOKENFILTERS = "tokenfilters"; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeBoolean(customAnalyzer); - if (customAnalyzer) { - tokenizer.writeTo(out); - if (charfilters != null) { - out.writeVInt(charfilters.length); - for (CharFilteredText charfilter : charfilters) { - charfilter.writeTo(out); - } - } else { - out.writeVInt(0); - } - if (tokenfilters != null) { - out.writeVInt(tokenfilters.length); - for (AnalyzeTokenList tokenfilter : tokenfilters) { - tokenfilter.writeTo(out); - } - } else { - out.writeVInt(0); - } - } else { - analyzer.writeTo(out); - } - } - - public static class AnalyzeTokenList implements Writeable, ToXContentObject { - private final String name; - private final AnalyzeResponse.AnalyzeToken[] tokens; - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - AnalyzeTokenList that = (AnalyzeTokenList) o; - return Objects.equals(name, that.name) && - Arrays.equals(tokens, that.tokens); - } - - @Override - public int hashCode() { - int result = Objects.hash(name); - result = 31 * result + Arrays.hashCode(tokens); - return result; - } - - public AnalyzeTokenList(String name, AnalyzeResponse.AnalyzeToken[] tokens) { - this.name = name; - this.tokens = tokens; - } - - public AnalyzeTokenList(StreamInput in) throws IOException { - name = in.readString(); - int size = in.readVInt(); - if (size > 0) { - tokens = new AnalyzeResponse.AnalyzeToken[size]; - for (int i = 0; i < size; i++) { - tokens[i] = new AnalyzeResponse.AnalyzeToken(in); - } - } - else { - tokens = null; - } - } - - public String getName() { - return name; - } - - public AnalyzeResponse.AnalyzeToken[] getTokens() { - return tokens; - } - - XContentBuilder toXContentWithoutObject(XContentBuilder builder, Params params) throws IOException { - builder.field(Fields.NAME, this.name); - builder.startArray(AnalyzeResponse.Fields.TOKENS); - if (tokens != null) { - for (AnalyzeResponse.AnalyzeToken token : tokens) { - token.toXContent(builder, params); - } - } - builder.endArray(); - return builder; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - toXContentWithoutObject(builder, params); - builder.endObject(); - return builder; - } - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("token_list", - true, args -> new AnalyzeTokenList((String) args[0], - fromList(AnalyzeResponse.AnalyzeToken.class, (List)args[1]))); - - static { - PARSER.declareString(constructorArg(), new ParseField(Fields.NAME)); - PARSER.declareObjectArray(constructorArg(), (p, c) -> AnalyzeResponse.AnalyzeToken.fromXContent(p), - new ParseField(AnalyzeResponse.Fields.TOKENS)); - } - - public static AnalyzeTokenList fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(name); - if (tokens != null) { - out.writeVInt(tokens.length); - for (AnalyzeResponse.AnalyzeToken token : tokens) { - token.writeTo(out); - } - } else { - out.writeVInt(0); - } - } - } - - public static class CharFilteredText implements Writeable, ToXContentObject { - private final String name; - private final String[] texts; - - public CharFilteredText(String name, String[] texts) { - this.name = name; - if (texts != null) { - this.texts = texts; - } else { - this.texts = Strings.EMPTY_ARRAY; - } - } - - public CharFilteredText(StreamInput in) throws IOException { - name = in.readString(); - texts = in.readStringArray(); - } - - public String getName() { - return name; - } - - public String[] getTexts() { - return texts; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(Fields.NAME, name); - builder.array(Fields.FILTERED_TEXT, texts); - builder.endObject(); - return builder; - } - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("char_filtered_text", - true, args -> new CharFilteredText((String) args[0], ((List) args[1]).toArray(new String[0]))); - - static { - PARSER.declareString(constructorArg(), new ParseField(Fields.NAME)); - PARSER.declareStringArray(constructorArg(), new ParseField(Fields.FILTERED_TEXT)); - } - - public static CharFilteredText fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(name); - out.writeStringArray(texts); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - CharFilteredText that = (CharFilteredText) o; - return Objects.equals(name, that.name) && - Arrays.equals(texts, that.texts); - } - - @Override - public int hashCode() { - int result = Objects.hash(name); - result = 31 * result + Arrays.hashCode(texts); - return result; - } - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 55bd593742667..b6079cc9c6953 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -79,7 +79,7 @@ /** * Transport action used to execute analyze requests */ -public class TransportAnalyzeAction extends TransportSingleShardAction { +public class TransportAnalyzeAction extends TransportSingleShardAction { private final Settings settings; private final IndicesService indicesService; @@ -90,19 +90,19 @@ public TransportAnalyzeAction(Settings settings, ThreadPool threadPool, ClusterS TransportService transportService, IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Environment environment) { super(AnalyzeAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, - AnalyzeRequest::new, ThreadPool.Names.ANALYZE); + AnalyzeAction.Request::new, ThreadPool.Names.ANALYZE); this.settings = settings; this.indicesService = indicesService; this.environment = environment; } @Override - protected Writeable.Reader getResponseReader() { - return AnalyzeResponse::new; + protected Writeable.Reader getResponseReader() { + return AnalyzeAction.Response::new; } @Override - protected boolean resolveIndex(AnalyzeRequest request) { + protected boolean resolveIndex(AnalyzeAction.Request request) { return request.index() != null; } @@ -124,71 +124,92 @@ protected ShardsIterator shards(ClusterState state, InternalRequest request) { } @Override - protected AnalyzeResponse shardOperation(AnalyzeRequest request, ShardId shardId) { - try { - final IndexService indexService; - if (shardId != null) { - indexService = indicesService.indexServiceSafe(shardId.getIndex()); - } else { - indexService = null; - } - String field = null; - Analyzer analyzer = null; - if (request.field() != null) { - if (indexService == null) { - throw new IllegalArgumentException( - "No index provided, and trying to analyzer based on a specific field which requires the index parameter"); - } - MappedFieldType fieldType = indexService.mapperService().fullName(request.field()); - if (fieldType != null) { - if (fieldType.tokenized() || fieldType instanceof KeywordFieldMapper.KeywordFieldType) { - analyzer = fieldType.indexAnalyzer(); - } else { - throw new IllegalArgumentException("Can't process field [" + request.field() + - "], Analysis requests are only supported on tokenized fields"); - } - field = fieldType.name(); - } - } - if (field == null) { - /** - * TODO: _all is disabled by default and index.query.default_field can define multiple fields or patterns so we should - * probably makes the field name mandatory in analyze query. - **/ - if (indexService != null) { - field = indexService.getIndexSettings().getDefaultFields().get(0); - } + protected AnalyzeAction.Response shardOperation(AnalyzeAction.Request request, ShardId shardId) throws IOException { + final IndexService indexService = getIndexService(shardId); + final int maxTokenCount = indexService == null ? + IndexSettings.MAX_TOKEN_COUNT_SETTING.get(settings) : indexService.getIndexSettings().getMaxTokenCount(); + + return analyze(request, indicesService.getAnalysis(), environment, indexService, maxTokenCount); + } + + public static AnalyzeAction.Response analyze(AnalyzeAction.Request request, AnalysisRegistry analysisRegistry, + Environment environment, IndexService indexService, int maxTokenCount) throws IOException { + + IndexAnalyzers indexAnalyzers = indexService == null ? null : indexService.getIndexAnalyzers(); + + // First, we check to see if the request requires a custom analyzer. If so, then we + // need to build it and then close it after use. + try (Analyzer analyzer = buildCustomAnalyzer(request, analysisRegistry, indexAnalyzers, environment)) { + if (analyzer != null) { + return analyze(request, analyzer, maxTokenCount); } - final AnalysisRegistry analysisRegistry = indicesService.getAnalysis(); - final int maxTokenCount = indexService == null ? - IndexSettings.MAX_TOKEN_COUNT_SETTING.get(settings) : indexService.getIndexSettings().getMaxTokenCount(); - return analyze(request, field, analyzer, indexService != null ? indexService.getIndexAnalyzers() : null, - analysisRegistry, environment, maxTokenCount); - } catch (IOException e) { - throw new ElasticsearchException("analysis failed", e); } + // Otherwise we use a built-in analyzer, which should not be closed + return analyze(request, getAnalyzer(request, analysisRegistry, indexService), maxTokenCount); + } + + private IndexService getIndexService(ShardId shardId) { + if (shardId != null) { + return indicesService.indexServiceSafe(shardId.getIndex()); + } + return null; } - public static AnalyzeResponse analyze(AnalyzeRequest request, String field, Analyzer analyzer, IndexAnalyzers indexAnalyzers, - AnalysisRegistry analysisRegistry, Environment environment, int maxTokenCount) throws IOException { - boolean closeAnalyzer = false; - if (analyzer == null && request.analyzer() != null) { - if (indexAnalyzers == null) { - analyzer = analysisRegistry.getAnalyzer(request.analyzer()); + private static Analyzer getAnalyzer(AnalyzeAction.Request request, AnalysisRegistry analysisRegistry, + IndexService indexService) throws IOException { + if (request.analyzer() != null) { + if (indexService == null) { + Analyzer analyzer = analysisRegistry.getAnalyzer(request.analyzer()); if (analyzer == null) { throw new IllegalArgumentException("failed to find global analyzer [" + request.analyzer() + "]"); } + return analyzer; } else { - analyzer = indexAnalyzers.get(request.analyzer()); + Analyzer analyzer = indexService.getIndexAnalyzers().get(request.analyzer()); if (analyzer == null) { throw new IllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]"); } + return analyzer; + } + } + if (request.normalizer() != null) { + // Get normalizer from indexAnalyzers + if (indexService == null) { + throw new IllegalArgumentException("analysis based on a normalizer requires an index"); + } + Analyzer analyzer = indexService.getIndexAnalyzers().getNormalizer(request.normalizer()); + if (analyzer == null) { + throw new IllegalArgumentException("failed to find normalizer under [" + request.normalizer() + "]"); } - } else if (request.tokenizer() != null) { + } + if (request.field() != null) { + if (indexService == null) { + throw new IllegalArgumentException("analysis based on a specific field requires an index"); + } + MappedFieldType fieldType = indexService.mapperService().fullName(request.field()); + if (fieldType != null) { + if (fieldType.tokenized() || fieldType instanceof KeywordFieldMapper.KeywordFieldType) { + return fieldType.indexAnalyzer(); + } else { + throw new IllegalArgumentException("Can't process field [" + request.field() + + "], Analysis requests are only supported on tokenized fields"); + } + } + } + if (indexService == null) { + return analysisRegistry.getAnalyzer("standard"); + } else { + return indexService.getIndexAnalyzers().getDefaultIndexAnalyzer(); + } + } + + private static Analyzer buildCustomAnalyzer(AnalyzeAction.Request request, AnalysisRegistry analysisRegistry, + IndexAnalyzers indexAnalyzers, Environment environment) throws IOException { + if (request.tokenizer() != null) { final IndexSettings indexSettings = indexAnalyzers == null ? null : indexAnalyzers.getIndexSettings(); Tuple tokenizerFactory = parseTokenizerFactory(request, indexAnalyzers, - analysisRegistry, environment); + analysisRegistry, environment); List charFilterFactoryList = parseCharFilterFactories(request, indexSettings, analysisRegistry, environment, false); @@ -196,18 +217,11 @@ public static AnalyzeResponse analyze(AnalyzeRequest request, String field, Anal List tokenFilterFactoryList = parseTokenFilterFactories(request, indexSettings, analysisRegistry, environment, tokenizerFactory, charFilterFactoryList, false); - analyzer = new CustomAnalyzer(tokenizerFactory.v1(), tokenizerFactory.v2(), - charFilterFactoryList.toArray(new CharFilterFactory[charFilterFactoryList.size()]), - tokenFilterFactoryList.toArray(new TokenFilterFactory[tokenFilterFactoryList.size()])); - closeAnalyzer = true; - } else if (request.normalizer() != null) { - // Get normalizer from indexAnalyzers - analyzer = indexAnalyzers.getNormalizer(request.normalizer()); - if (analyzer == null) { - throw new IllegalArgumentException("failed to find normalizer under [" + request.normalizer() + "]"); - } + return new CustomAnalyzer(tokenizerFactory.v1(), tokenizerFactory.v2(), + charFilterFactoryList.toArray(new CharFilterFactory[0]), + tokenFilterFactoryList.toArray(new TokenFilterFactory[0])); } else if (((request.tokenFilters() != null && request.tokenFilters().size() > 0) - || (request.charFilters() != null && request.charFilters().size() > 0))) { + || (request.charFilters() != null && request.charFilters().size() > 0))) { final IndexSettings indexSettings = indexAnalyzers == null ? null : indexAnalyzers.getIndexSettings(); // custom normalizer = if normalizer == null but filter or char_filter is not null and tokenizer/analyzer is null // get charfilter and filter from request @@ -221,46 +235,29 @@ public static AnalyzeResponse analyze(AnalyzeRequest request, String field, Anal parseTokenFilterFactories(request, indexSettings, analysisRegistry, environment, new Tuple<>(keywordTokenizerName, keywordTokenizerFactory), charFilterFactoryList, true); - analyzer = new CustomAnalyzer("keyword_for_normalizer", - keywordTokenizerFactory, - charFilterFactoryList.toArray(new CharFilterFactory[charFilterFactoryList.size()]), - tokenFilterFactoryList.toArray(new TokenFilterFactory[tokenFilterFactoryList.size()])); - closeAnalyzer = true; - } else if (analyzer == null) { - if (indexAnalyzers == null) { - analyzer = analysisRegistry.getAnalyzer("standard"); - } else { - analyzer = indexAnalyzers.getDefaultIndexAnalyzer(); - } + return new CustomAnalyzer("keyword_for_normalizer", keywordTokenizerFactory, + charFilterFactoryList.toArray(new CharFilterFactory[0]), tokenFilterFactoryList.toArray(new TokenFilterFactory[0])); } - if (analyzer == null) { - throw new IllegalArgumentException("failed to find analyzer"); - } - - List tokens = null; - DetailAnalyzeResponse detail = null; + return null; + } + private static AnalyzeAction.Response analyze(AnalyzeAction.Request request, Analyzer analyzer, int maxTokenCount) { if (request.explain()) { - detail = detailAnalyze(request, analyzer, field, maxTokenCount); - } else { - tokens = simpleAnalyze(request, analyzer, field, maxTokenCount); + return new AnalyzeAction.Response(null, detailAnalyze(request, analyzer, maxTokenCount)); } - - if (closeAnalyzer) { - analyzer.close(); - } - - return new AnalyzeResponse(tokens, detail); + return new AnalyzeAction.Response(simpleAnalyze(request, analyzer, maxTokenCount), null); } - private static List simpleAnalyze(AnalyzeRequest request, - Analyzer analyzer, String field, int maxTokenCount) { + private static List simpleAnalyze(AnalyzeAction.Request request, + Analyzer analyzer, int maxTokenCount) { TokenCounter tc = new TokenCounter(maxTokenCount); - List tokens = new ArrayList<>(); + List tokens = new ArrayList<>(); int lastPosition = -1; int lastOffset = 0; + // Note that we always pass "" as the field to the various Analyzer methods, because + // the analyzers we use here are all field-specific and so ignore this parameter for (String text : request.text()) { - try (TokenStream stream = analyzer.tokenStream(field, text)) { + try (TokenStream stream = analyzer.tokenStream("", text)) { stream.reset(); CharTermAttribute term = stream.addAttribute(CharTermAttribute.class); PositionIncrementAttribute posIncr = stream.addAttribute(PositionIncrementAttribute.class); @@ -273,7 +270,7 @@ private static List simpleAnalyze(AnalyzeRequest r if (increment > 0) { lastPosition = lastPosition + increment; } - tokens.add(new AnalyzeResponse.AnalyzeToken(term.toString(), lastPosition, lastOffset + offset.startOffset(), + tokens.add(new AnalyzeAction.AnalyzeToken(term.toString(), lastPosition, lastOffset + offset.startOffset(), lastOffset + offset.endOffset(), posLen.getPositionLength(), type.type(), null)); tc.increment(); } @@ -281,8 +278,8 @@ private static List simpleAnalyze(AnalyzeRequest r lastOffset += offset.endOffset(); lastPosition += posIncr.getPositionIncrement(); - lastPosition += analyzer.getPositionIncrementGap(field); - lastOffset += analyzer.getOffsetGap(field); + lastPosition += analyzer.getPositionIncrementGap(""); + lastOffset += analyzer.getOffsetGap(""); } catch (IOException e) { throw new ElasticsearchException("failed to analyze", e); } @@ -290,8 +287,9 @@ private static List simpleAnalyze(AnalyzeRequest r return tokens; } - private static DetailAnalyzeResponse detailAnalyze(AnalyzeRequest request, Analyzer analyzer, String field, int maxTokenCount) { - DetailAnalyzeResponse detailResponse; + private static AnalyzeAction.DetailAnalyzeResponse detailAnalyze(AnalyzeAction.Request request, Analyzer analyzer, + int maxTokenCount) { + AnalyzeAction.DetailAnalyzeResponse detailResponse; final Set includeAttributes = new HashSet<>(); if (request.attributes() != null) { for (String attribute : request.attributes()) { @@ -336,7 +334,7 @@ private static DetailAnalyzeResponse detailAnalyze(AnalyzeRequest request, Analy // analyzing only tokenizer Tokenizer tokenizer = tokenizerFactory.create(); tokenizer.setReader(reader); - tokenizerTokenListCreator.analyze(tokenizer, customAnalyzer, field, includeAttributes); + tokenizerTokenListCreator.analyze(tokenizer, customAnalyzer, includeAttributes); // analyzing each tokenfilter if (tokenFilterFactories != null) { @@ -346,30 +344,30 @@ private static DetailAnalyzeResponse detailAnalyze(AnalyzeRequest request, Analy } TokenStream stream = createStackedTokenStream(request.text()[textIndex], charFilterFactories, tokenizerFactory, tokenFilterFactories, tokenFilterIndex + 1); - tokenFiltersTokenListCreator[tokenFilterIndex].analyze(stream, customAnalyzer, field, includeAttributes); + tokenFiltersTokenListCreator[tokenFilterIndex].analyze(stream, customAnalyzer, includeAttributes); } } } - DetailAnalyzeResponse.CharFilteredText[] charFilteredLists = - new DetailAnalyzeResponse.CharFilteredText[charFiltersTexts.length]; + AnalyzeAction.CharFilteredText[] charFilteredLists = + new AnalyzeAction.CharFilteredText[charFiltersTexts.length]; if (charFilterFactories != null) { for (int charFilterIndex = 0; charFilterIndex < charFiltersTexts.length; charFilterIndex++) { - charFilteredLists[charFilterIndex] = new DetailAnalyzeResponse.CharFilteredText( + charFilteredLists[charFilterIndex] = new AnalyzeAction.CharFilteredText( charFilterFactories[charFilterIndex].name(), charFiltersTexts[charFilterIndex]); } } - DetailAnalyzeResponse.AnalyzeTokenList[] tokenFilterLists = - new DetailAnalyzeResponse.AnalyzeTokenList[tokenFiltersTokenListCreator.length]; + AnalyzeAction.AnalyzeTokenList[] tokenFilterLists = + new AnalyzeAction.AnalyzeTokenList[tokenFiltersTokenListCreator.length]; if (tokenFilterFactories != null) { for (int tokenFilterIndex = 0; tokenFilterIndex < tokenFiltersTokenListCreator.length; tokenFilterIndex++) { - tokenFilterLists[tokenFilterIndex] = new DetailAnalyzeResponse.AnalyzeTokenList( + tokenFilterLists[tokenFilterIndex] = new AnalyzeAction.AnalyzeTokenList( tokenFilterFactories[tokenFilterIndex].name(), tokenFiltersTokenListCreator[tokenFilterIndex].getArrayTokens()); } } - detailResponse = new DetailAnalyzeResponse(charFilteredLists, new DetailAnalyzeResponse.AnalyzeTokenList( + detailResponse = new AnalyzeAction.DetailAnalyzeResponse(charFilteredLists, new AnalyzeAction.AnalyzeTokenList( customAnalyzer.getTokenizerName(), tokenizerTokenListCreator.getArrayTokens()), tokenFilterLists); } else { String name; @@ -381,10 +379,11 @@ private static DetailAnalyzeResponse detailAnalyze(AnalyzeRequest request, Analy TokenListCreator tokenListCreator = new TokenListCreator(maxTokenCount); for (String text : request.text()) { - tokenListCreator.analyze(analyzer.tokenStream(field, text), analyzer, field, - includeAttributes); + tokenListCreator.analyze(analyzer.tokenStream("", text), analyzer, + includeAttributes); } - detailResponse = new DetailAnalyzeResponse(new DetailAnalyzeResponse.AnalyzeTokenList(name, tokenListCreator.getArrayTokens())); + detailResponse + = new AnalyzeAction.DetailAnalyzeResponse(new AnalyzeAction.AnalyzeTokenList(name, tokenListCreator.getArrayTokens())); } return detailResponse; } @@ -443,7 +442,7 @@ private void increment(){ private static class TokenListCreator { int lastPosition = -1; int lastOffset = 0; - List tokens; + List tokens; private TokenCounter tc; TokenListCreator(int maxTokenCount) { @@ -451,7 +450,7 @@ private static class TokenListCreator { tc = new TokenCounter(maxTokenCount); } - private void analyze(TokenStream stream, Analyzer analyzer, String field, Set includeAttributes) { + private void analyze(TokenStream stream, Analyzer analyzer, Set includeAttributes) { try { stream.reset(); CharTermAttribute term = stream.addAttribute(CharTermAttribute.class); @@ -465,7 +464,7 @@ private void analyze(TokenStream stream, Analyzer analyzer, String field, Set 0) { lastPosition = lastPosition + increment; } - tokens.add(new AnalyzeResponse.AnalyzeToken(term.toString(), lastPosition, lastOffset + offset.startOffset(), + tokens.add(new AnalyzeAction.AnalyzeToken(term.toString(), lastPosition, lastOffset + offset.startOffset(), lastOffset + offset.endOffset(), posLen.getPositionLength(), type.type(), extractExtendedAttributes(stream, includeAttributes))); tc.increment(); @@ -474,8 +473,8 @@ private void analyze(TokenStream stream, Analyzer analyzer, String field, Set extractExtendedAttributes(TokenStream stream, return extendedAttributes; } - private static List parseCharFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, + private static List parseCharFilterFactories(AnalyzeAction.Request request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry, Environment environment, boolean normalizer) throws IOException { List charFilterFactoryList = new ArrayList<>(); if (request.charFilters() != null && request.charFilters().size() > 0) { - List charFilters = request.charFilters(); - for (AnalyzeRequest.NameOrDefinition charFilter : charFilters) { + List charFilters = request.charFilters(); + for (AnalyzeAction.Request.NameOrDefinition charFilter : charFilters) { CharFilterFactory charFilterFactory; // parse anonymous settings if (charFilter.definition != null) { @@ -619,7 +618,7 @@ public TokenFilterFactory apply(String s) { } } - private static List parseTokenFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, + private static List parseTokenFilterFactories(AnalyzeAction.Request request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry, Environment environment, Tuple tokenizerFactory, List charFilterFactoryList, @@ -627,8 +626,8 @@ private static List parseTokenFilterFactories(AnalyzeRequest List tokenFilterFactoryList = new ArrayList<>(); DeferredTokenFilterRegistry deferredRegistry = new DeferredTokenFilterRegistry(analysisRegistry, indexSettings); if (request.tokenFilters() != null && request.tokenFilters().size() > 0) { - List tokenFilters = request.tokenFilters(); - for (AnalyzeRequest.NameOrDefinition tokenFilter : tokenFilters) { + List tokenFilters = request.tokenFilters(); + for (AnalyzeAction.Request.NameOrDefinition tokenFilter : tokenFilters) { TokenFilterFactory tokenFilterFactory; // parse anonymous settings if (tokenFilter.definition != null) { @@ -683,11 +682,12 @@ private static List parseTokenFilterFactories(AnalyzeRequest return tokenFilterFactoryList; } - private static Tuple parseTokenizerFactory(AnalyzeRequest request, IndexAnalyzers indexAnalzyers, - AnalysisRegistry analysisRegistry, Environment environment) throws IOException { + private static Tuple parseTokenizerFactory(AnalyzeAction.Request request, IndexAnalyzers indexAnalzyers, + AnalysisRegistry analysisRegistry, + Environment environment) throws IOException { String name; TokenizerFactory tokenizerFactory; - final AnalyzeRequest.NameOrDefinition tokenizer = request.tokenizer(); + final AnalyzeAction.Request.NameOrDefinition tokenizer = request.tokenizer(); // parse anonymous settings if (tokenizer.definition != null) { Settings settings = getAnonymousSettings(tokenizer.definition); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index eae849407666e..6a229667745d3 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -165,14 +165,22 @@ protected void doExecute(Task task, BulkRequest bulkRequest, ActionListener analyze(AnalyzeRequest request); + ActionFuture analyze(AnalyzeAction.Request request); /** * Analyze text under the provided index. */ - void analyze(AnalyzeRequest request, ActionListener listener); + void analyze(AnalyzeAction.Request request, ActionListener listener); /** * Analyze text under the provided index. diff --git a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java index e79f0567babe6..5c4c7ad44c6d2 100644 --- a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -142,9 +142,7 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequestBuilder; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheAction; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequestBuilder; @@ -1596,12 +1594,12 @@ public UpdateSettingsRequestBuilder prepareUpdateSettings(String... indices) { } @Override - public ActionFuture analyze(final AnalyzeRequest request) { + public ActionFuture analyze(final AnalyzeAction.Request request) { return execute(AnalyzeAction.INSTANCE, request); } @Override - public void analyze(final AnalyzeRequest request, final ActionListener listener) { + public void analyze(final AnalyzeAction.Request request, final ActionListener listener) { execute(AnalyzeAction.INSTANCE, request, listener); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index 1c84e8ce9fc6a..93832e08cd048 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -20,7 +20,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; @@ -32,7 +31,6 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.ToXContent; @@ -52,8 +50,6 @@ public class IndexTemplateMetaData extends AbstractDiffable { - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(IndexTemplateMetaData.class)); - private final String name; private final int order; @@ -97,7 +93,7 @@ public IndexTemplateMetaData(String name, int order, Integer version, this.name = name; this.order = order; this.version = version; - this.patterns= patterns; + this.patterns = patterns; this.settings = settings; this.mappings = mappings; this.aliases = aliases; @@ -227,7 +223,7 @@ public void writeTo(StreamOutput out) throws IOException { public static class Builder { private static final Set VALID_FIELDS = Sets.newHashSet( - "template", "order", "mappings", "settings", "index_patterns", "aliases", "version"); + "order", "mappings", "settings", "index_patterns", "aliases", "version"); private String name; @@ -487,11 +483,7 @@ public static IndexTemplateMetaData fromXContent(XContentParser parser, String t builder.patterns(index_patterns); } } else if (token.isValue()) { - // Prior to 5.1.0, elasticsearch only supported a single index pattern called `template` (#21009) - if("template".equals(currentFieldName)) { - deprecationLogger.deprecated("Deprecated field [template] used, replaced by [index_patterns]"); - builder.patterns(Collections.singletonList(parser.text())); - } else if ("order".equals(currentFieldName)) { + if ("order".equals(currentFieldName)) { builder.order(parser.intValue()); } else if ("version".equals(currentFieldName)) { builder.version(parser.intValue()); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java index 96a0cafc35b11..2e004dfad2e55 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.lucene.search; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.ExtendedCommonTermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; @@ -148,8 +147,6 @@ public static Query applyMinimumShouldMatch(BooleanQuery query, @Nullable String public static Query maybeApplyMinimumShouldMatch(Query query, @Nullable String minimumShouldMatch) { if (query instanceof BooleanQuery) { return applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch); - } else if (query instanceof ExtendedCommonTermsQuery) { - ((ExtendedCommonTermsQuery)query).setLowFreqMinimumNumberShouldMatch(minimumShouldMatch); } return query; } diff --git a/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java deleted file mode 100644 index 5b2853ac359c2..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java +++ /dev/null @@ -1,417 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.index.Term; -import org.apache.lucene.queries.ExtendedCommonTermsQuery; -import org.apache.lucene.search.BooleanClause.Occur; -import org.apache.lucene.search.Query; -import org.apache.lucene.util.BytesRefBuilder; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.mapper.MappedFieldType; - -import java.io.IOException; -import java.util.Objects; - -/** - * CommonTermsQuery query is a query that executes high-frequency terms in a - * optional sub-query to prevent slow queries due to "common" terms like - * stopwords. This query basically builds 2 queries off the {@code #add(Term) - * added} terms where low-frequency terms are added to a required boolean clause - * and high-frequency terms are added to an optional boolean clause. The - * optional clause is only executed if the required "low-frequency' clause - * matches. - * - * @deprecated Since max_optimization optimization landed in 7.0, normal MatchQuery - * will achieve the same result without any configuration. - */ -@Deprecated -public class CommonTermsQueryBuilder extends AbstractQueryBuilder { - - public static final String COMMON_TERMS_QUERY_DEPRECATION_MSG = "[match] query which can efficiently " + - "skip blocks of documents if the total number of hits is not tracked"; - - public static final String NAME = "common"; - - public static final float DEFAULT_CUTOFF_FREQ = 0.01f; - public static final Operator DEFAULT_HIGH_FREQ_OCCUR = Operator.OR; - public static final Operator DEFAULT_LOW_FREQ_OCCUR = Operator.OR; - - private static final ParseField CUTOFF_FREQUENCY_FIELD = new ParseField("cutoff_frequency"); - private static final ParseField MINIMUM_SHOULD_MATCH_FIELD = new ParseField("minimum_should_match"); - private static final ParseField LOW_FREQ_OPERATOR_FIELD = new ParseField("low_freq_operator"); - private static final ParseField HIGH_FREQ_OPERATOR_FIELD = new ParseField("high_freq_operator"); - private static final ParseField DISABLE_COORD_FIELD = new ParseField("disable_coord") - .withAllDeprecated("disable_coord has been removed"); - private static final ParseField ANALYZER_FIELD = new ParseField("analyzer"); - private static final ParseField QUERY_FIELD = new ParseField("query"); - private static final ParseField HIGH_FREQ_FIELD = new ParseField("high_freq"); - private static final ParseField LOW_FREQ_FIELD = new ParseField("low_freq"); - - private final String fieldName; - - private final Object text; - - private Operator highFreqOperator = DEFAULT_HIGH_FREQ_OCCUR; - - private Operator lowFreqOperator = DEFAULT_LOW_FREQ_OCCUR; - - private String analyzer = null; - - private String lowFreqMinimumShouldMatch = null; - - private String highFreqMinimumShouldMatch = null; - - private float cutoffFrequency = DEFAULT_CUTOFF_FREQ; - - /** - * Constructs a new common terms query. - * @deprecated See {@link CommonTermsQueryBuilder} for more details. - */ - @Deprecated - public CommonTermsQueryBuilder(String fieldName, Object text) { - if (Strings.isEmpty(fieldName)) { - throw new IllegalArgumentException("field name is null or empty"); - } - if (text == null) { - throw new IllegalArgumentException("text cannot be null"); - } - this.fieldName = fieldName; - this.text = text; - } - - /** - * Read from a stream. - * @deprecated See {@link CommonTermsQueryBuilder} for more details. - */ - @Deprecated - public CommonTermsQueryBuilder(StreamInput in) throws IOException { - super(in); - fieldName = in.readString(); - text = in.readGenericValue(); - highFreqOperator = Operator.readFromStream(in); - lowFreqOperator = Operator.readFromStream(in); - analyzer = in.readOptionalString(); - lowFreqMinimumShouldMatch = in.readOptionalString(); - highFreqMinimumShouldMatch = in.readOptionalString(); - cutoffFrequency = in.readFloat(); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - out.writeString(this.fieldName); - out.writeGenericValue(this.text); - highFreqOperator.writeTo(out); - lowFreqOperator.writeTo(out); - out.writeOptionalString(analyzer); - out.writeOptionalString(lowFreqMinimumShouldMatch); - out.writeOptionalString(highFreqMinimumShouldMatch); - out.writeFloat(cutoffFrequency); - } - - public String fieldName() { - return this.fieldName; - } - - public Object value() { - return this.text; - } - - /** - * Sets the operator to use for terms with a high document frequency - * (greater than or equal to {@link #cutoffFrequency(float)}. Defaults to - * {@code AND}. - */ - public CommonTermsQueryBuilder highFreqOperator(Operator operator) { - this.highFreqOperator = (operator == null) ? DEFAULT_HIGH_FREQ_OCCUR : operator; - return this; - } - - public Operator highFreqOperator() { - return highFreqOperator; - } - - /** - * Sets the operator to use for terms with a low document frequency (less - * than {@link #cutoffFrequency(float)}. Defaults to {@code AND}. - */ - public CommonTermsQueryBuilder lowFreqOperator(Operator operator) { - this.lowFreqOperator = (operator == null) ? DEFAULT_LOW_FREQ_OCCUR : operator; - return this; - } - - public Operator lowFreqOperator() { - return lowFreqOperator; - } - - /** - * Explicitly set the analyzer to use. Defaults to use explicit mapping - * config for the field, or, if not set, the default search analyzer. - */ - public CommonTermsQueryBuilder analyzer(String analyzer) { - this.analyzer = analyzer; - return this; - } - - public String analyzer() { - return this.analyzer; - } - - /** - * Sets the cutoff document frequency for high / low frequent terms. A value - * in [0..1] (or absolute number >=1) representing the maximum threshold of - * a terms document frequency to be considered a low frequency term. - * Defaults to - * {@code {@value #DEFAULT_CUTOFF_FREQ}} - */ - public CommonTermsQueryBuilder cutoffFrequency(float cutoffFrequency) { - this.cutoffFrequency = cutoffFrequency; - return this; - } - - public float cutoffFrequency() { - return this.cutoffFrequency; - } - - /** - * Sets the minimum number of high frequent query terms that need to match in order to - * produce a hit when there are no low frequent terms. - */ - public CommonTermsQueryBuilder highFreqMinimumShouldMatch(String highFreqMinimumShouldMatch) { - this.highFreqMinimumShouldMatch = highFreqMinimumShouldMatch; - return this; - } - - public String highFreqMinimumShouldMatch() { - return this.highFreqMinimumShouldMatch; - } - - /** - * Sets the minimum number of low frequent query terms that need to match in order to - * produce a hit. - */ - public CommonTermsQueryBuilder lowFreqMinimumShouldMatch(String lowFreqMinimumShouldMatch) { - this.lowFreqMinimumShouldMatch = lowFreqMinimumShouldMatch; - return this; - } - - public String lowFreqMinimumShouldMatch() { - return this.lowFreqMinimumShouldMatch; - } - - @Override - protected void doXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(NAME); - builder.startObject(fieldName); - builder.field(QUERY_FIELD.getPreferredName(), text); - builder.field(HIGH_FREQ_OPERATOR_FIELD.getPreferredName(), highFreqOperator.toString()); - builder.field(LOW_FREQ_OPERATOR_FIELD.getPreferredName(), lowFreqOperator.toString()); - if (analyzer != null) { - builder.field(ANALYZER_FIELD.getPreferredName(), analyzer); - } - builder.field(CUTOFF_FREQUENCY_FIELD.getPreferredName(), cutoffFrequency); - if (lowFreqMinimumShouldMatch != null || highFreqMinimumShouldMatch != null) { - builder.startObject(MINIMUM_SHOULD_MATCH_FIELD.getPreferredName()); - if (lowFreqMinimumShouldMatch != null) { - builder.field(LOW_FREQ_FIELD.getPreferredName(), lowFreqMinimumShouldMatch); - } - if (highFreqMinimumShouldMatch != null) { - builder.field(HIGH_FREQ_FIELD.getPreferredName(), highFreqMinimumShouldMatch); - } - builder.endObject(); - } - printBoostAndQueryName(builder); - builder.endObject(); - builder.endObject(); - } - - public static CommonTermsQueryBuilder fromXContent(XContentParser parser) throws IOException { - String fieldName = null; - Object text = null; - float boost = AbstractQueryBuilder.DEFAULT_BOOST; - String analyzer = null; - String lowFreqMinimumShouldMatch = null; - String highFreqMinimumShouldMatch = null; - Operator highFreqOperator = CommonTermsQueryBuilder.DEFAULT_HIGH_FREQ_OCCUR; - Operator lowFreqOperator = CommonTermsQueryBuilder.DEFAULT_LOW_FREQ_OCCUR; - float cutoffFrequency = CommonTermsQueryBuilder.DEFAULT_CUTOFF_FREQ; - String queryName = null; - XContentParser.Token token; - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName); - fieldName = currentFieldName; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - String innerFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - innerFieldName = parser.currentName(); - } else if (token.isValue()) { - if (LOW_FREQ_FIELD.match(innerFieldName, parser.getDeprecationHandler())) { - lowFreqMinimumShouldMatch = parser.text(); - } else if (HIGH_FREQ_FIELD.match(innerFieldName, parser.getDeprecationHandler())) { - highFreqMinimumShouldMatch = parser.text(); - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME + - "] query does not support [" + innerFieldName - + "] for [" + currentFieldName + "]"); - } - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME + - "] unexpected token type [" + token - + "] after [" + innerFieldName + "]"); - } - } - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME + - "] query does not support [" + currentFieldName + "]"); - } - } else if (token.isValue()) { - if (QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - text = parser.objectText(); - } else if (ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - analyzer = parser.text(); - } else if (DISABLE_COORD_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - // ignore - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - boost = parser.floatValue(); - } else if (HIGH_FREQ_OPERATOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - highFreqOperator = Operator.fromString(parser.text()); - } else if (LOW_FREQ_OPERATOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - lowFreqOperator = Operator.fromString(parser.text()); - } else if (MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - lowFreqMinimumShouldMatch = parser.text(); - } else if (CUTOFF_FREQUENCY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - cutoffFrequency = parser.floatValue(); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - queryName = parser.text(); - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + CommonTermsQueryBuilder.NAME + - "] query does not support [" + currentFieldName + "]"); - } - } - } - } else { - throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName()); - fieldName = parser.currentName(); - text = parser.objectText(); - } - } - - return new CommonTermsQueryBuilder(fieldName, text) - .lowFreqMinimumShouldMatch(lowFreqMinimumShouldMatch) - .highFreqMinimumShouldMatch(highFreqMinimumShouldMatch) - .analyzer(analyzer) - .highFreqOperator(highFreqOperator) - .lowFreqOperator(lowFreqOperator) - .cutoffFrequency(cutoffFrequency) - .boost(boost) - .queryName(queryName); - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - protected Query doToQuery(QueryShardContext context) throws IOException { - String field; - MappedFieldType fieldType = context.fieldMapper(fieldName); - if (fieldType != null) { - field = fieldType.name(); - } else { - field = fieldName; - } - - Analyzer analyzerObj; - if (analyzer == null) { - if (fieldType != null) { - analyzerObj = context.getSearchAnalyzer(fieldType); - } else { - analyzerObj = context.getMapperService().searchAnalyzer(); - } - } else { - analyzerObj = context.getMapperService().getIndexAnalyzers().get(analyzer); - if (analyzerObj == null) { - throw new QueryShardException(context, "[common] analyzer [" + analyzer + "] not found"); - } - } - - Occur highFreqOccur = highFreqOperator.toBooleanClauseOccur(); - Occur lowFreqOccur = lowFreqOperator.toBooleanClauseOccur(); - - ExtendedCommonTermsQuery commonsQuery = new ExtendedCommonTermsQuery(highFreqOccur, lowFreqOccur, cutoffFrequency); - return parseQueryString(commonsQuery, text, field, analyzerObj, lowFreqMinimumShouldMatch, highFreqMinimumShouldMatch); - } - - private static Query parseQueryString(ExtendedCommonTermsQuery query, Object queryString, String field, Analyzer analyzer, - String lowFreqMinimumShouldMatch, String highFreqMinimumShouldMatch) throws IOException { - // Logic similar to QueryParser#getFieldQuery - try (TokenStream source = analyzer.tokenStream(field, queryString.toString())) { - source.reset(); - CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class); - BytesRefBuilder builder = new BytesRefBuilder(); - while (source.incrementToken()) { - // UTF-8 - builder.copyChars(termAtt); - query.add(new Term(field, builder.toBytesRef())); - } - } - - query.setLowFreqMinimumNumberShouldMatch(lowFreqMinimumShouldMatch); - query.setHighFreqMinimumNumberShouldMatch(highFreqMinimumShouldMatch); - return query; - } - - @Override - protected int doHashCode() { - return Objects.hash(fieldName, text, highFreqOperator, lowFreqOperator, analyzer, - lowFreqMinimumShouldMatch, highFreqMinimumShouldMatch, cutoffFrequency); - } - - @Override - protected boolean doEquals(CommonTermsQueryBuilder other) { - return Objects.equals(fieldName, other.fieldName) && - Objects.equals(text, other.text) && - Objects.equals(highFreqOperator, other.highFreqOperator) && - Objects.equals(lowFreqOperator, other.lowFreqOperator) && - Objects.equals(analyzer, other.analyzer) && - Objects.equals(lowFreqMinimumShouldMatch, other.lowFreqMinimumShouldMatch) && - Objects.equals(highFreqMinimumShouldMatch, other.highFreqMinimumShouldMatch) && - Objects.equals(cutoffFrequency, other.cutoffFrequency); - } -} diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java b/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java index 92b4fa664193c..5e1047684840f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java @@ -96,7 +96,7 @@ protected IntervalsSource analyzeText(CachingTokenFilter stream, int maxGaps, bo // formulate a single term, boolean, or phrase. if (numTokens == 0) { - return null; + return NO_INTERVALS; } else if (numTokens == 1) { // single term return analyzeTerm(stream); @@ -231,7 +231,7 @@ protected List analyzeGraph(TokenStream source) throws IOExcept return clauses; } - private static final IntervalsSource NO_INTERVALS = new IntervalsSource() { + static final IntervalsSource NO_INTERVALS = new IntervalsSource() { @Override public IntervalIterator intervals(String field, LeafReaderContext ctx) { diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java index fa93550759324..8d5b6e71927e2 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java @@ -21,6 +21,7 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -43,17 +44,7 @@ */ public class MatchQueryBuilder extends AbstractQueryBuilder { - private static final String CUTOFF_FREQUENCY_DEPRECATION_MSG = "you can omit this option, " + - "the [match] query can skip block of documents efficiently if the total number of hits is not tracked"; - public static final ParseField ZERO_TERMS_QUERY_FIELD = new ParseField("zero_terms_query"); - /** - * @deprecated Since max_optimization optimization landed in 7.0, normal MatchQuery - * will achieve the same result without any configuration. - */ - @Deprecated - public static final ParseField CUTOFF_FREQUENCY_FIELD = - new ParseField("cutoff_frequency").withAllDeprecated(CUTOFF_FREQUENCY_DEPRECATION_MSG); public static final ParseField LENIENT_FIELD = new ParseField("lenient"); public static final ParseField FUZZY_TRANSPOSITIONS_FIELD = new ParseField("fuzzy_transpositions"); public static final ParseField FUZZY_REWRITE_FIELD = new ParseField("fuzzy_rewrite"); @@ -95,8 +86,6 @@ public class MatchQueryBuilder extends AbstractQueryBuilder { private MatchQuery.ZeroTermsQuery zeroTermsQuery = MatchQuery.DEFAULT_ZERO_TERMS_QUERY; - private Float cutoffFrequency = null; - private boolean autoGenerateSynonymsPhraseQuery = true; /** @@ -131,7 +120,10 @@ public MatchQueryBuilder(StreamInput in) throws IOException { minimumShouldMatch = in.readOptionalString(); fuzzyRewrite = in.readOptionalString(); fuzziness = in.readOptionalWriteable(Fuzziness::new); - cutoffFrequency = in.readOptionalFloat(); + // cutoff_frequency has been removed + if (in.getVersion().before(Version.V_8_0_0)) { + in.readOptionalFloat(); + } autoGenerateSynonymsPhraseQuery = in.readBoolean(); } @@ -150,7 +142,10 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeOptionalString(minimumShouldMatch); out.writeOptionalString(fuzzyRewrite); out.writeOptionalWriteable(fuzziness); - out.writeOptionalFloat(cutoffFrequency); + // cutoff_frequency has been removed + if (out.getVersion().before(Version.V_8_0_0)) { + out.writeOptionalFloat(null); + } out.writeBoolean(autoGenerateSynonymsPhraseQuery); } @@ -241,24 +236,6 @@ public int maxExpansions() { return this.maxExpansions; } - /** - * Set a cutoff value in [0..1] (or absolute number >=1) representing the - * maximum threshold of a terms document frequency to be considered a low - * frequency term. - * - * @deprecated see {@link MatchQueryBuilder#CUTOFF_FREQUENCY_FIELD} for more details - */ - @Deprecated - public MatchQueryBuilder cutoffFrequency(float cutoff) { - this.cutoffFrequency = cutoff; - return this; - } - - /** Gets the optional cutoff value, can be {@code null} if not set previously */ - public Float cutoffFrequency() { - return this.cutoffFrequency; - } - /** Sets optional minimumShouldMatch value to apply to the query */ public MatchQueryBuilder minimumShouldMatch(String minimumShouldMatch) { this.minimumShouldMatch = minimumShouldMatch; @@ -375,9 +352,6 @@ public void doXContent(XContentBuilder builder, Params params) throws IOExceptio builder.field(FUZZY_TRANSPOSITIONS_FIELD.getPreferredName(), fuzzyTranspositions); builder.field(LENIENT_FIELD.getPreferredName(), lenient); builder.field(ZERO_TERMS_QUERY_FIELD.getPreferredName(), zeroTermsQuery.toString()); - if (cutoffFrequency != null) { - builder.field(CUTOFF_FREQUENCY_FIELD.getPreferredName(), cutoffFrequency); - } builder.field(GENERATE_SYNONYMS_PHRASE_QUERY.getPreferredName(), autoGenerateSynonymsPhraseQuery); printBoostAndQueryName(builder); builder.endObject(); @@ -402,7 +376,6 @@ protected Query doToQuery(QueryShardContext context) throws IOException { matchQuery.setTranspositions(fuzzyTranspositions); matchQuery.setFuzzyRewriteMethod(QueryParsers.parseRewriteMethod(fuzzyRewrite, null, LoggingDeprecationHandler.INSTANCE)); matchQuery.setLenient(lenient); - matchQuery.setCommonTermsCutoff(cutoffFrequency); matchQuery.setZeroTermsQuery(zeroTermsQuery); matchQuery.setAutoGenerateSynonymsPhraseQuery(autoGenerateSynonymsPhraseQuery); @@ -424,7 +397,6 @@ protected boolean doEquals(MatchQueryBuilder other) { Objects.equals(lenient, other.lenient) && Objects.equals(fuzzyTranspositions, other.fuzzyTranspositions) && Objects.equals(zeroTermsQuery, other.zeroTermsQuery) && - Objects.equals(cutoffFrequency, other.cutoffFrequency) && Objects.equals(autoGenerateSynonymsPhraseQuery, other.autoGenerateSynonymsPhraseQuery); } @@ -432,7 +404,7 @@ protected boolean doEquals(MatchQueryBuilder other) { protected int doHashCode() { return Objects.hash(fieldName, value, operator, analyzer, fuzziness, prefixLength, maxExpansions, minimumShouldMatch, - fuzzyRewrite, lenient, fuzzyTranspositions, zeroTermsQuery, cutoffFrequency, autoGenerateSynonymsPhraseQuery); + fuzzyRewrite, lenient, fuzzyTranspositions, zeroTermsQuery, autoGenerateSynonymsPhraseQuery); } @Override @@ -453,7 +425,6 @@ public static MatchQueryBuilder fromXContent(XContentParser parser) throws IOExc boolean fuzzyTranspositions = FuzzyQuery.defaultTranspositions; String fuzzyRewrite = null; boolean lenient = MatchQuery.DEFAULT_LENIENCY; - Float cutOffFrequency = null; ZeroTermsQuery zeroTermsQuery = MatchQuery.DEFAULT_ZERO_TERMS_QUERY; boolean autoGenerateSynonymsPhraseQuery = true; String queryName = null; @@ -491,8 +462,6 @@ public static MatchQueryBuilder fromXContent(XContentParser parser) throws IOExc fuzzyTranspositions = parser.booleanValue(); } else if (LENIENT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { lenient = parser.booleanValue(); - } else if (CUTOFF_FREQUENCY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - cutOffFrequency = parser.floatValue(); } else if (ZERO_TERMS_QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { String zeroTermsValue = parser.text(); if ("none".equalsIgnoreCase(zeroTermsValue)) { @@ -539,14 +508,10 @@ public static MatchQueryBuilder fromXContent(XContentParser parser) throws IOExc matchQuery.fuzzyTranspositions(fuzzyTranspositions); matchQuery.maxExpansions(maxExpansion); matchQuery.lenient(lenient); - if (cutOffFrequency != null) { - matchQuery.cutoffFrequency(cutOffFrequency); - } matchQuery.zeroTermsQuery(zeroTermsQuery); matchQuery.autoGenerateSynonymsPhraseQuery(autoGenerateSynonymsPhraseQuery); matchQuery.queryName(queryName); matchQuery.boost(boost); return matchQuery; } - } diff --git a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java index fb400a9d3fc75..07f7ae4b79398 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java @@ -22,6 +22,7 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -51,9 +52,6 @@ */ public class MultiMatchQueryBuilder extends AbstractQueryBuilder { - private static final String CUTOFF_FREQUENCY_DEPRECATION_MSG = "you can omit this option, " + - "the [multi_match] query can skip block of documents efficiently if the total number of hits is not tracked"; - public static final String NAME = "multi_match"; public static final MultiMatchQueryBuilder.Type DEFAULT_TYPE = MultiMatchQueryBuilder.Type.BEST_FIELDS; @@ -67,8 +65,6 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder blendedFields) { - return blendTerms(context, new BytesRef[] {value}, commonTermsCutoff, tieBreaker, lenient, blendedFields); + return blendTerms(context, new BytesRef[] {value}, tieBreaker, lenient, blendedFields); } - static Query blendTerms(QueryShardContext context, BytesRef[] values, Float commonTermsCutoff, float tieBreaker, + static Query blendTerms(QueryShardContext context, BytesRef[] values, float tieBreaker, boolean lenient, List blendedFields) { List queries = new ArrayList<>(); @@ -276,11 +276,7 @@ static Query blendTerms(QueryShardContext context, BytesRef[] values, Float comm if (i > 0) { terms = Arrays.copyOf(terms, i); blendedBoost = Arrays.copyOf(blendedBoost, i); - if (commonTermsCutoff != null) { - queries.add(BlendedTermQuery.commonTermsBlendedQuery(terms, blendedBoost, commonTermsCutoff)); - } else { - queries.add(BlendedTermQuery.dismaxBlendedQuery(terms, blendedBoost, tieBreaker)); - } + queries.add(BlendedTermQuery.dismaxBlendedQuery(terms, blendedBoost, tieBreaker)); } if (queries.size() == 1) { return queries.get(0); diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java index d47f0d9d7fdfd..332c049e13337 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java @@ -34,8 +34,6 @@ import java.nio.channels.FileChannel; import java.nio.file.Path; -import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; - /** * Each translog file is started with a translog header then followed by translog operations. */ @@ -123,6 +121,9 @@ static TranslogHeader read(final String translogUUID, final Path path, final Fil if (version == VERSION_CHECKSUMS) { throw new IllegalStateException("pre-2.0 translog found [" + path + "]"); } + if (version == VERSION_CHECKPOINTS) { + throw new IllegalStateException("pre-6.3 translog found [" + path + "]"); + } // Read the translogUUID final int uuidLen = in.readInt(); if (uuidLen > channel.size()) { @@ -141,17 +142,10 @@ static TranslogHeader read(final String translogUUID, final Path path, final Fil " this translog file belongs to a different translog"); } // Read the primary term - final long primaryTerm; - if (version == VERSION_PRIMARY_TERM) { - primaryTerm = in.readLong(); - } else { - assert version == VERSION_CHECKPOINTS : "Unknown header version [" + version + "]"; - primaryTerm = UNASSIGNED_PRIMARY_TERM; - } + assert version == VERSION_PRIMARY_TERM; + final long primaryTerm = in.readLong(); // Verify the checksum - if (version >= VERSION_PRIMARY_TERM) { - Translog.verifyChecksum(in); - } + Translog.verifyChecksum(in); assert primaryTerm >= 0 : "Primary term must be non-negative [" + primaryTerm + "]; translog path [" + path + "]"; final int headerSizeInBytes = headerSizeInBytes(version, uuid.length); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java index d9d6bbcfee98d..99c8598106843 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeAction.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.settings.Settings; @@ -29,8 +29,6 @@ import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; @@ -64,106 +62,10 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - - AnalyzeRequest analyzeRequest = new AnalyzeRequest(request.param("index")); - try (XContentParser parser = request.contentOrSourceParamParser()) { - buildFromContent(parser, analyzeRequest); - } catch (IOException e) { - throw new IllegalArgumentException("Failed to parse request body", e); + AnalyzeAction.Request analyzeRequest = AnalyzeAction.Request.fromXContent(parser, request.param("index")); + return channel -> client.admin().indices().analyze(analyzeRequest, new RestToXContentListener<>(channel)); } - - return channel -> client.admin().indices().analyze(analyzeRequest, new RestToXContentListener<>(channel)); } - static void buildFromContent(XContentParser parser, AnalyzeRequest analyzeRequest) - throws IOException { - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new IllegalArgumentException("Malformed content, must start with an object"); - } else { - XContentParser.Token token; - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (Fields.TEXT.match(currentFieldName, parser.getDeprecationHandler()) && - token == XContentParser.Token.VALUE_STRING) { - analyzeRequest.text(parser.text()); - } else if (Fields.TEXT.match(currentFieldName, parser.getDeprecationHandler()) && - token == XContentParser.Token.START_ARRAY) { - List texts = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token.isValue() == false) { - throw new IllegalArgumentException(currentFieldName + " array element should only contain text"); - } - texts.add(parser.text()); - } - analyzeRequest.text(texts.toArray(new String[texts.size()])); - } else if (Fields.ANALYZER.match(currentFieldName, parser.getDeprecationHandler()) - && token == XContentParser.Token.VALUE_STRING) { - analyzeRequest.analyzer(parser.text()); - } else if (Fields.FIELD.match(currentFieldName, parser.getDeprecationHandler()) && - token == XContentParser.Token.VALUE_STRING) { - analyzeRequest.field(parser.text()); - } else if (Fields.TOKENIZER.match(currentFieldName, parser.getDeprecationHandler())) { - if (token == XContentParser.Token.VALUE_STRING) { - analyzeRequest.tokenizer(parser.text()); - } else if (token == XContentParser.Token.START_OBJECT) { - analyzeRequest.tokenizer(parser.map()); - } else { - throw new IllegalArgumentException(currentFieldName + " should be tokenizer's name or setting"); - } - } else if (Fields.TOKEN_FILTERS.match(currentFieldName, parser.getDeprecationHandler()) - && token == XContentParser.Token.START_ARRAY) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.VALUE_STRING) { - analyzeRequest.addTokenFilter(parser.text()); - } else if (token == XContentParser.Token.START_OBJECT) { - analyzeRequest.addTokenFilter(parser.map()); - } else { - throw new IllegalArgumentException(currentFieldName - + " array element should contain filter's name or setting"); - } - } - } else if (Fields.CHAR_FILTERS.match(currentFieldName, parser.getDeprecationHandler()) - && token == XContentParser.Token.START_ARRAY) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.VALUE_STRING) { - analyzeRequest.addCharFilter(parser.text()); - } else if (token == XContentParser.Token.START_OBJECT) { - analyzeRequest.addCharFilter(parser.map()); - } else { - throw new IllegalArgumentException(currentFieldName - + " array element should contain char filter's name or setting"); - } - } - } else if (Fields.EXPLAIN.match(currentFieldName, parser.getDeprecationHandler())) { - if (parser.isBooleanValue()) { - analyzeRequest.explain(parser.booleanValue()); - } else { - throw new IllegalArgumentException(currentFieldName + " must be either 'true' or 'false'"); - } - } else if (Fields.ATTRIBUTES.match(currentFieldName, parser.getDeprecationHandler()) && - token == XContentParser.Token.START_ARRAY) { - List attributes = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token.isValue() == false) { - throw new IllegalArgumentException(currentFieldName + " array element should only contain attribute name"); - } - attributes.add(parser.text()); - } - analyzeRequest.attributes(attributes.toArray(new String[attributes.size()])); - } else if (Fields.NORMALIZER.match(currentFieldName, parser.getDeprecationHandler())) { - if (token == XContentParser.Token.VALUE_STRING) { - analyzeRequest.normalizer(parser.text()); - } else { - throw new IllegalArgumentException(currentFieldName + " should be normalizer's name"); - } - } else { - throw new IllegalArgumentException("Unknown parameter [" - + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] "); - } - } - } - } } diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index b0447d5781dfc..3e36def038e44 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -21,7 +21,6 @@ import org.apache.lucene.search.BooleanQuery; import org.elasticsearch.common.NamedRegistry; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.ShapesAvailability; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -34,7 +33,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.BoostingQueryBuilder; -import org.elasticsearch.index.query.CommonTermsQueryBuilder; import org.elasticsearch.index.query.ConstantScoreQueryBuilder; import org.elasticsearch.index.query.DisMaxQueryBuilder; import org.elasticsearch.index.query.DistanceFeatureQueryBuilder; @@ -272,7 +270,6 @@ import static java.util.Collections.unmodifiableMap; import static java.util.Objects.requireNonNull; -import static org.elasticsearch.index.query.CommonTermsQueryBuilder.COMMON_TERMS_QUERY_DEPRECATION_MSG; import static org.elasticsearch.index.query.SpanNearQueryBuilder.SpanGapQueryBuilder; /** @@ -769,8 +766,6 @@ private void registerQueryParsers(List plugins) { registerQuery(new QuerySpec<>(MoreLikeThisQueryBuilder.NAME, MoreLikeThisQueryBuilder::new, MoreLikeThisQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(WrapperQueryBuilder.NAME, WrapperQueryBuilder::new, WrapperQueryBuilder::fromXContent)); - registerQuery(new QuerySpec<>(new ParseField(CommonTermsQueryBuilder.NAME).withAllDeprecated(COMMON_TERMS_QUERY_DEPRECATION_MSG), - CommonTermsQueryBuilder::new, CommonTermsQueryBuilder::fromXContent)); registerQuery( new QuerySpec<>(SpanMultiTermQueryBuilder.NAME, SpanMultiTermQueryBuilder::new, SpanMultiTermQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(FunctionScoreQueryBuilder.NAME, FunctionScoreQueryBuilder::new, diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index eef9f4f42637c..7be8872ab5fac 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -933,34 +933,20 @@ public void onResponse(Void v) { if (countDown.countDown()) { final TcpChannel handshakeChannel = channels.get(0); try { - executeHandshake(node, handshakeChannel, connectionProfile, new ActionListener() { - @Override - public void onResponse(Version version) { - NodeChannels nodeChannels = new NodeChannels(node, channels, connectionProfile, version); - long relativeMillisTime = threadPool.relativeTimeInMillis(); - nodeChannels.channels.forEach(ch -> { - // Mark the channel init time - ch.getChannelStats().markAccessed(relativeMillisTime); - ch.addCloseListener(ActionListener.wrap(nodeChannels::close)); - }); - keepAlive.registerNodeConnection(nodeChannels.channels, connectionProfile); - listener.onResponse(nodeChannels); - } - - @Override - public void onFailure(Exception e) { - CloseableChannel.closeChannels(channels, false); - - if (e instanceof ConnectTransportException) { - listener.onFailure(e); - } else { - listener.onFailure(new ConnectTransportException(node, "general node connection failure", e)); - } - } - }); + executeHandshake(node, handshakeChannel, connectionProfile, ActionListener.wrap(version -> { + NodeChannels nodeChannels = new NodeChannels(node, channels, connectionProfile, version); + long relativeMillisTime = threadPool.relativeTimeInMillis(); + nodeChannels.channels.forEach(ch -> { + // Mark the channel init time + ch.getChannelStats().markAccessed(relativeMillisTime); + ch.addCloseListener(ActionListener.wrap(nodeChannels::close)); + }); + keepAlive.registerNodeConnection(nodeChannels.channels, connectionProfile); + listener.onResponse(nodeChannels); + }, e -> closeAndFail(e instanceof ConnectTransportException ? + e : new ConnectTransportException(node, "general node connection failure", e)))); } catch (Exception ex) { - CloseableChannel.closeChannels(channels, false); - listener.onFailure(ex); + closeAndFail(ex); } } } @@ -968,15 +954,23 @@ public void onFailure(Exception e) { @Override public void onFailure(Exception ex) { if (countDown.fastForward()) { - CloseableChannel.closeChannels(channels, false); - listener.onFailure(new ConnectTransportException(node, "connect_exception", ex)); + closeAndFail(new ConnectTransportException(node, "connect_exception", ex)); } } public void onTimeout() { if (countDown.fastForward()) { + closeAndFail(new ConnectTransportException(node, "connect_timeout[" + connectionProfile.getConnectTimeout() + "]")); + } + } + + private void closeAndFail(Exception e) { + try { CloseableChannel.closeChannels(channels, false); - listener.onFailure(new ConnectTransportException(node, "connect_timeout[" + connectionProfile.getConnectTimeout() + "]")); + } catch (Exception ex) { + e.addSuppressed(ex); + } finally { + listener.onFailure(e); } } } diff --git a/server/src/main/resources/org/elasticsearch/bootstrap/security.policy b/server/src/main/resources/org/elasticsearch/bootstrap/security.policy index 4df99ef6f8836..fbfa0f39b1691 100644 --- a/server/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/server/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -47,7 +47,7 @@ grant codeBase "${codebase.lucene-misc}" { permission java.nio.file.LinkPermission "hard"; }; -grant codeBase "${codebase.plugin-classloader}" { +grant codeBase "${codebase.elasticsearch-plugin-classloader}" { // needed to create the classloader which allows plugins to extend other plugins permission java.lang.RuntimePermission "createClassLoader"; }; diff --git a/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java b/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java index 4e4b04d1ff19c..3c24dc2d42b82 100644 --- a/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java +++ b/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java @@ -30,7 +30,6 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.CommonTermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; @@ -147,21 +146,6 @@ public void testMultiPhrasePrefixQuery() throws Exception { BreakIterator.getSentenceInstance(Locale.ROOT), 0, outputs); } - public void testCommonTermsQuery() throws Exception { - final String[] inputs = { - "The quick brown fox." - }; - final String[] outputs = { - "The quick brown fox." - }; - CommonTermsQuery query = new CommonTermsQuery(BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD, 128); - query.add(new Term("text", "quick")); - query.add(new Term("text", "brown")); - query.add(new Term("text", "fox")); - assertHighlightOneDoc("text", inputs, new StandardAnalyzer(), query, Locale.ROOT, - BreakIterator.getSentenceInstance(Locale.ROOT), 0, outputs); - } - public void testSentenceBoundedBreakIterator() throws Exception { final String[] inputs = { "The quick brown fox in a long sentence with another quick brown fox. " + diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java index eb57964af4dc4..79d50fff2ac51 100644 --- a/server/src/test/java/org/elasticsearch/VersionTests.java +++ b/server/src/test/java/org/elasticsearch/VersionTests.java @@ -186,7 +186,7 @@ public void testMinCompatVersion() { // from 7.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is // released since we need to bump the supported minor in Version#minimumCompatibilityVersion() - Version lastVersion = Version.V_6_8_0; // TODO: remove this once min compat version is a constant instead of method + Version lastVersion = Version.fromString("6.8.0"); // TODO: remove this once min compat version is a constant instead of method assertEquals(lastVersion.major, Version.V_7_0_0.minimumCompatibilityVersion().major); assertEquals("did you miss to bump the minor in Version#minimumCompatibilityVersion()", lastVersion.minor, Version.V_7_0_0.minimumCompatibilityVersion().minor); @@ -345,7 +345,7 @@ public static void assertUnknownVersion(Version version) { public void testIsCompatible() { assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); - assertFalse(isCompatible(Version.fromString("6.7.0"), Version.fromString("7.0.0"))); + assertFalse(isCompatible(Version.V_7_0_0, Version.V_8_0_0)); assertTrue(isCompatible(Version.fromString("6.8.0"), Version.fromString("7.0.0"))); assertFalse(isCompatible(Version.fromId(2000099), Version.V_7_0_0)); assertFalse(isCompatible(Version.fromId(2000099), Version.fromString("6.5.0"))); diff --git a/server/src/test/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/test/java/org/elasticsearch/action/IndicesRequestIT.java index 2dbb52d547f97..cb5bad021eae9 100644 --- a/server/src/test/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/server/src/test/java/org/elasticsearch/action/IndicesRequestIT.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheAction; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexAction; @@ -207,7 +206,7 @@ public void testAnalyze() { String analyzeShardAction = AnalyzeAction.NAME + "[s]"; interceptTransportActions(analyzeShardAction); - AnalyzeRequest analyzeRequest = new AnalyzeRequest(randomIndexOrAlias()); + AnalyzeAction.Request analyzeRequest = new AnalyzeAction.Request(randomIndexOrAlias()); analyzeRequest.text("text"); internalCluster().coordOnlyNodeClient().admin().indices().analyze(analyzeRequest).actionGet(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java index b0c2e34c30620..1bef91623e885 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java @@ -22,14 +22,14 @@ import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; import org.elasticsearch.action.admin.indices.analyze.TransportAnalyzeAction; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AbstractCharFilterFactory; import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; @@ -53,6 +53,9 @@ import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + /** * Tests for {@link TransportAnalyzeAction}. See the rest tests in the {@code analysis-common} module for places where this code gets a ton @@ -131,25 +134,32 @@ public List getPreConfiguredCharFilters() { idxMaxTokenCount = idxSettings.getMaxTokenCount(); } + private IndexService mockIndexService() { + IndexService is = mock(IndexService.class); + when(is.getIndexAnalyzers()).thenReturn(indexAnalyzers); + return is; + } + /** * Test behavior when the named analysis component isn't defined on the index. In that case we should build with defaults. */ public void testNoIndexAnalyzers() throws IOException { // Refer to an analyzer by its type so we get its default configuration - AnalyzeRequest request = new AnalyzeRequest(); + AnalyzeAction.Request request = new AnalyzeAction.Request(); request.text("the quick brown fox"); request.analyzer("standard"); - AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, null, registry, environment, maxTokenCount); - List tokens = analyze.getTokens(); + AnalyzeAction.Response analyze + = TransportAnalyzeAction.analyze(request, registry, environment, mockIndexService(), maxTokenCount); + List tokens = analyze.getTokens(); assertEquals(4, tokens.size()); // Refer to a token filter by its type so we get its default configuration - request = new AnalyzeRequest(); + request = new AnalyzeAction.Request(); request.text("the qu1ck brown fox"); request.tokenizer("standard"); request.addTokenFilter("mock"); - analyze = TransportAnalyzeAction.analyze(request, "text", null, randomBoolean() ? indexAnalyzers : null, registry, environment, - maxTokenCount); + analyze + = TransportAnalyzeAction.analyze(request, registry, environment, randomBoolean() ? mockIndexService() : null, maxTokenCount); tokens = analyze.getTokens(); assertEquals(3, tokens.size()); assertEquals("qu1ck", tokens.get(0).getTerm()); @@ -157,12 +167,12 @@ public void testNoIndexAnalyzers() throws IOException { assertEquals("fox", tokens.get(2).getTerm()); // We can refer to a pre-configured token filter by its name to get it - request = new AnalyzeRequest(); + request = new AnalyzeAction.Request(); request.text("the qu1ck brown fox"); request.tokenizer("standard"); request.addCharFilter("append_foo"); - analyze = TransportAnalyzeAction.analyze(request, "text", null, randomBoolean() ? indexAnalyzers : null, registry, environment, - maxTokenCount); + analyze + = TransportAnalyzeAction.analyze(request, registry, environment, randomBoolean() ? mockIndexService() : null, maxTokenCount); tokens = analyze.getTokens(); assertEquals(4, tokens.size()); assertEquals("the", tokens.get(0).getTerm()); @@ -171,13 +181,13 @@ public void testNoIndexAnalyzers() throws IOException { assertEquals("foxfoo", tokens.get(3).getTerm()); // We can refer to a token filter by its type to get its default configuration - request = new AnalyzeRequest(); + request = new AnalyzeAction.Request(); request.text("the qu1ck brown fox"); request.tokenizer("standard"); request.addCharFilter("append"); request.text("the qu1ck brown fox"); - analyze = TransportAnalyzeAction.analyze(request, "text", null, randomBoolean() ? indexAnalyzers : null, registry, environment, - maxTokenCount); + analyze + = TransportAnalyzeAction.analyze(request, registry, environment, randomBoolean() ? mockIndexService() : null, maxTokenCount); tokens = analyze.getTokens(); assertEquals(4, tokens.size()); assertEquals("the", tokens.get(0).getTerm()); @@ -187,11 +197,11 @@ public void testNoIndexAnalyzers() throws IOException { } public void testFillsAttributes() throws IOException { - AnalyzeRequest request = new AnalyzeRequest(); + AnalyzeAction.Request request = new AnalyzeAction.Request(); request.analyzer("standard"); request.text("the 1 brown fox"); - AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, null, registry, environment, maxTokenCount); - List tokens = analyze.getTokens(); + AnalyzeAction.Response analyze = TransportAnalyzeAction.analyze(request, registry, environment, null, maxTokenCount); + List tokens = analyze.getTokens(); assertEquals(4, tokens.size()); assertEquals("the", tokens.get(0).getTerm()); assertEquals(0, tokens.get(0).getStartOffset()); @@ -219,19 +229,19 @@ public void testFillsAttributes() throws IOException { } public void testWithIndexAnalyzers() throws IOException { - AnalyzeRequest request = new AnalyzeRequest(); + AnalyzeAction.Request request = new AnalyzeAction.Request(); request.text("the quick brown fox"); request.analyzer("custom_analyzer"); - AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, - maxTokenCount); - List tokens = analyze.getTokens(); + AnalyzeAction.Response analyze + = TransportAnalyzeAction.analyze(request, registry, environment, mockIndexService(), maxTokenCount); + List tokens = analyze.getTokens(); assertEquals(3, tokens.size()); assertEquals("quick", tokens.get(0).getTerm()); assertEquals("brown", tokens.get(1).getTerm()); assertEquals("fox", tokens.get(2).getTerm()); request.analyzer("standard"); - analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, maxTokenCount); + analyze = TransportAnalyzeAction.analyze(request, registry, environment, mockIndexService(), maxTokenCount); tokens = analyze.getTokens(); assertEquals(4, tokens.size()); assertEquals("the", tokens.get(0).getTerm()); @@ -242,7 +252,7 @@ public void testWithIndexAnalyzers() throws IOException { // Switch the analyzer out for just a tokenizer request.analyzer(null); request.tokenizer("standard"); - analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, maxTokenCount); + analyze = TransportAnalyzeAction.analyze(request, registry, environment, mockIndexService(), maxTokenCount); tokens = analyze.getTokens(); assertEquals(4, tokens.size()); assertEquals("the", tokens.get(0).getTerm()); @@ -252,7 +262,7 @@ public void testWithIndexAnalyzers() throws IOException { // Now try applying our token filter request.addTokenFilter("mock"); - analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, maxTokenCount); + analyze = TransportAnalyzeAction.analyze(request, registry, environment, mockIndexService(), maxTokenCount); tokens = analyze.getTokens(); assertEquals(3, tokens.size()); assertEquals("quick", tokens.get(0).getTerm()); @@ -260,24 +270,32 @@ public void testWithIndexAnalyzers() throws IOException { assertEquals("fox", tokens.get(2).getTerm()); } - public void testGetIndexAnalyserWithoutIndexAnalyzers() throws IOException { + public void testGetIndexAnalyserWithoutIndexAnalyzers() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> TransportAnalyzeAction.analyze( - new AnalyzeRequest() + new AnalyzeAction.Request() .analyzer("custom_analyzer") .text("the qu1ck brown fox-dog"), - "text", null, null, registry, environment, maxTokenCount)); + registry, environment, null, maxTokenCount)); assertEquals(e.getMessage(), "failed to find global analyzer [custom_analyzer]"); } - public void testUnknown() throws IOException { + public void testGetFieldAnalyzerWithoutIndexAnalyzers() { + AnalyzeAction.Request req = new AnalyzeAction.Request().field("field").text("text"); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + TransportAnalyzeAction.analyze(req, registry, environment, null, maxTokenCount); + }); + assertEquals(e.getMessage(), "analysis based on a specific field requires an index"); + } + + public void testUnknown() { boolean notGlobal = randomBoolean(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> TransportAnalyzeAction.analyze( - new AnalyzeRequest() + new AnalyzeAction.Request() .analyzer("foobar") .text("the qu1ck brown fox"), - "text", null, notGlobal ? indexAnalyzers : null, registry, environment, maxTokenCount)); + registry, environment, notGlobal ? mockIndexService() : null, maxTokenCount)); if (notGlobal) { assertEquals(e.getMessage(), "failed to find analyzer [foobar]"); } else { @@ -286,10 +304,10 @@ public void testUnknown() throws IOException { e = expectThrows(IllegalArgumentException.class, () -> TransportAnalyzeAction.analyze( - new AnalyzeRequest() + new AnalyzeAction.Request() .tokenizer("foobar") .text("the qu1ck brown fox"), - "text", null, notGlobal ? indexAnalyzers : null, registry, environment, maxTokenCount)); + registry, environment, notGlobal ? mockIndexService() : null, maxTokenCount)); if (notGlobal) { assertEquals(e.getMessage(), "failed to find tokenizer under [foobar]"); } else { @@ -298,11 +316,11 @@ public void testUnknown() throws IOException { e = expectThrows(IllegalArgumentException.class, () -> TransportAnalyzeAction.analyze( - new AnalyzeRequest() + new AnalyzeAction.Request() .tokenizer("standard") .addTokenFilter("foobar") .text("the qu1ck brown fox"), - "text", null, notGlobal ? indexAnalyzers : null, registry, environment, maxTokenCount)); + registry, environment, notGlobal ? mockIndexService() : null, maxTokenCount)); if (notGlobal) { assertEquals(e.getMessage(), "failed to find token filter under [foobar]"); } else { @@ -311,12 +329,12 @@ public void testUnknown() throws IOException { e = expectThrows(IllegalArgumentException.class, () -> TransportAnalyzeAction.analyze( - new AnalyzeRequest() + new AnalyzeAction.Request() .tokenizer("standard") .addTokenFilter("lowercase") .addCharFilter("foobar") .text("the qu1ck brown fox"), - "text", null, notGlobal ? indexAnalyzers : null, registry, environment, maxTokenCount)); + registry, environment, notGlobal ? mockIndexService() : null, maxTokenCount)); if (notGlobal) { assertEquals(e.getMessage(), "failed to find char filter under [foobar]"); } else { @@ -325,21 +343,21 @@ public void testUnknown() throws IOException { e = expectThrows(IllegalArgumentException.class, () -> TransportAnalyzeAction.analyze( - new AnalyzeRequest() + new AnalyzeAction.Request() .normalizer("foobar") .text("the qu1ck brown fox"), - "text", null, indexAnalyzers, registry, environment, maxTokenCount)); + registry, environment, mockIndexService(), maxTokenCount)); assertEquals(e.getMessage(), "failed to find normalizer under [foobar]"); } public void testNonPreBuildTokenFilter() throws IOException { - AnalyzeRequest request = new AnalyzeRequest(); + AnalyzeAction.Request request = new AnalyzeAction.Request(); request.tokenizer("standard"); request.addTokenFilter("stop"); // stop token filter is not prebuilt in AnalysisModule#setupPreConfiguredTokenFilters() request.text("the quick brown fox"); - AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, - maxTokenCount); - List tokens = analyze.getTokens(); + AnalyzeAction.Response analyze + = TransportAnalyzeAction.analyze(request, registry, environment, mockIndexService(), maxTokenCount); + List tokens = analyze.getTokens(); assertEquals(3, tokens.size()); assertEquals("quick", tokens.get(0).getTerm()); assertEquals("brown", tokens.get(1).getTerm()); @@ -347,12 +365,12 @@ public void testNonPreBuildTokenFilter() throws IOException { } public void testNormalizerWithIndex() throws IOException { - AnalyzeRequest request = new AnalyzeRequest("index"); + AnalyzeAction.Request request = new AnalyzeAction.Request("index"); request.normalizer("my_normalizer"); request.text("ABc"); - AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, - maxTokenCount); - List tokens = analyze.getTokens(); + AnalyzeAction.Response analyze + = TransportAnalyzeAction.analyze(request, registry, environment, mockIndexService(), maxTokenCount); + List tokens = analyze.getTokens(); assertEquals(1, tokens.size()); assertEquals("abc", tokens.get(0).getTerm()); @@ -362,7 +380,7 @@ public void testNormalizerWithIndex() throws IOException { * This test is equivalent of calling _analyze without a specific index. * The default value for the maximum token count is used. */ - public void testExceedDefaultMaxTokenLimit() throws IOException{ + public void testExceedDefaultMaxTokenLimit() { // create a string with No. words more than maxTokenCount StringBuilder sbText = new StringBuilder(); for (int i = 0; i <= maxTokenCount; i++){ @@ -372,23 +390,21 @@ public void testExceedDefaultMaxTokenLimit() throws IOException{ String text = sbText.toString(); // request with explain=false to test simpleAnalyze path in TransportAnalyzeAction - AnalyzeRequest request = new AnalyzeRequest(); + AnalyzeAction.Request request = new AnalyzeAction.Request(); request.text(text); request.analyzer("standard"); IllegalStateException e = expectThrows(IllegalStateException.class, - () -> TransportAnalyzeAction.analyze( - request, "text", null, null, registry, environment, maxTokenCount)); + () -> TransportAnalyzeAction.analyze(request, registry, environment, null, maxTokenCount)); assertEquals(e.getMessage(), "The number of tokens produced by calling _analyze has exceeded the allowed maximum of [" + maxTokenCount + "]." + " This limit can be set by changing the [index.analyze.max_token_count] index level setting."); // request with explain=true to test detailAnalyze path in TransportAnalyzeAction - AnalyzeRequest request2 = new AnalyzeRequest(); + AnalyzeAction.Request request2 = new AnalyzeAction.Request(); request2.text(text); request2.analyzer("standard"); request2.explain(true); IllegalStateException e2 = expectThrows(IllegalStateException.class, - () -> TransportAnalyzeAction.analyze( - request2, "text", null, null, registry, environment, maxTokenCount)); + () -> TransportAnalyzeAction.analyze(request2, registry, environment, null, maxTokenCount)); assertEquals(e2.getMessage(), "The number of tokens produced by calling _analyze has exceeded the allowed maximum of [" + maxTokenCount + "]." + " This limit can be set by changing the [index.analyze.max_token_count] index level setting."); } @@ -397,7 +413,7 @@ public void testExceedDefaultMaxTokenLimit() throws IOException{ * This test is equivalent of calling _analyze against a specific index. * The index specific value for the maximum token count is used. */ - public void testExceedSetMaxTokenLimit() throws IOException{ + public void testExceedSetMaxTokenLimit() { // create a string with No. words more than idxMaxTokenCount StringBuilder sbText = new StringBuilder(); for (int i = 0; i <= idxMaxTokenCount; i++){ @@ -406,12 +422,11 @@ public void testExceedSetMaxTokenLimit() throws IOException{ } String text = sbText.toString(); - AnalyzeRequest request = new AnalyzeRequest(); + AnalyzeAction.Request request = new AnalyzeAction.Request(); request.text(text); request.analyzer("standard"); IllegalStateException e = expectThrows(IllegalStateException.class, - () -> TransportAnalyzeAction.analyze( - request, "text", null, indexAnalyzers, registry, environment, idxMaxTokenCount)); + () -> TransportAnalyzeAction.analyze(request, registry, environment, null, idxMaxTokenCount)); assertEquals(e.getMessage(), "The number of tokens produced by calling _analyze has exceeded the allowed maximum of [" + idxMaxTokenCount + "]." + " This limit can be set by changing the [index.analyze.max_token_count] index level setting."); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestTests.java index d83b2fae0f917..2c4c9212fb53a 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeRequestTests.java @@ -26,11 +26,12 @@ import java.io.IOException; +import static org.hamcrest.CoreMatchers.containsString; public class AnalyzeRequestTests extends ESTestCase { - public void testValidation() throws Exception { - AnalyzeRequest request = new AnalyzeRequest(); + public void testValidation() { + AnalyzeAction.Request request = new AnalyzeAction.Request(); ActionRequestValidationException e = request.validate(); assertNotNull("text validation should fail", e); @@ -60,16 +61,45 @@ public void testValidation() throws Exception { e = request.validate(); assertTrue(e.getMessage().contains("tokenizer/analyze should be null if normalizer is specified")); - AnalyzeRequest requestAnalyzer = new AnalyzeRequest("index"); + AnalyzeAction.Request requestAnalyzer = new AnalyzeAction.Request("index"); requestAnalyzer.normalizer("some normalizer"); requestAnalyzer.text("something"); requestAnalyzer.analyzer("analyzer"); e = requestAnalyzer.validate(); assertTrue(e.getMessage().contains("tokenizer/analyze should be null if normalizer is specified")); + + { + AnalyzeAction.Request analyzerPlusDefs = new AnalyzeAction.Request("index"); + analyzerPlusDefs.text("text"); + analyzerPlusDefs.analyzer("analyzer"); + analyzerPlusDefs.addTokenFilter("tokenfilter"); + e = analyzerPlusDefs.validate(); + assertNotNull(e); + assertThat(e.getMessage(), containsString("cannot define extra components on a named analyzer")); + } + + { + AnalyzeAction.Request analyzerPlusDefs = new AnalyzeAction.Request("index"); + analyzerPlusDefs.text("text"); + analyzerPlusDefs.normalizer("normalizer"); + analyzerPlusDefs.addTokenFilter("tokenfilter"); + e = analyzerPlusDefs.validate(); + assertNotNull(e); + assertThat(e.getMessage(), containsString("cannot define extra components on a named normalizer")); + } + { + AnalyzeAction.Request analyzerPlusDefs = new AnalyzeAction.Request("index"); + analyzerPlusDefs.text("text"); + analyzerPlusDefs.field("field"); + analyzerPlusDefs.addTokenFilter("tokenfilter"); + e = analyzerPlusDefs.validate(); + assertNotNull(e); + assertThat(e.getMessage(), containsString("cannot define extra components on a field-specific analyzer")); + } } public void testSerialization() throws IOException { - AnalyzeRequest request = new AnalyzeRequest("foo"); + AnalyzeAction.Request request = new AnalyzeAction.Request("foo"); request.text("a", "b"); request.tokenizer("tokenizer"); request.addTokenFilter("tokenfilter"); @@ -79,7 +109,7 @@ public void testSerialization() throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { request.writeTo(output); try (StreamInput in = output.bytes().streamInput()) { - AnalyzeRequest serialized = new AnalyzeRequest(); + AnalyzeAction.Request serialized = new AnalyzeAction.Request(); serialized.readFrom(in); assertArrayEquals(request.text(), serialized.text()); assertEquals(request.tokenizer().name, serialized.tokenizer().name); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java index a4cee7a4cde2a..95fc010f37f86 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponseTests.java @@ -20,124 +20,35 @@ package org.elasticsearch.action.admin.indices.analyze; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.function.Predicate; import static org.hamcrest.Matchers.equalTo; -public class AnalyzeResponseTests extends AbstractSerializingTestCase { - - @Override - protected Predicate getRandomFieldsExcludeFilter() { - return s -> s.contains("tokens."); - } - - @Override - protected AnalyzeResponse doParseInstance(XContentParser parser) throws IOException { - return AnalyzeResponse.fromXContent(parser); - } - - @Override - protected Writeable.Reader instanceReader() { - return AnalyzeResponse::new; - } - - @Override - protected AnalyzeResponse createTestInstance() { - int tokenCount = randomIntBetween(1, 30); - AnalyzeResponse.AnalyzeToken[] tokens = new AnalyzeResponse.AnalyzeToken[tokenCount]; - for (int i = 0; i < tokenCount; i++) { - tokens[i] = randomToken(); - } - if (randomBoolean()) { - DetailAnalyzeResponse.CharFilteredText[] charfilters = null; - DetailAnalyzeResponse.AnalyzeTokenList[] tokenfilters = null; - if (randomBoolean()) { - charfilters = new DetailAnalyzeResponse.CharFilteredText[]{ - new DetailAnalyzeResponse.CharFilteredText("my_charfilter", new String[]{"one two"}) - }; - } - if (randomBoolean()) { - tokenfilters = new DetailAnalyzeResponse.AnalyzeTokenList[]{ - new DetailAnalyzeResponse.AnalyzeTokenList("my_tokenfilter_1", tokens), - new DetailAnalyzeResponse.AnalyzeTokenList("my_tokenfilter_2", tokens) - }; - } - DetailAnalyzeResponse dar = new DetailAnalyzeResponse( - charfilters, - new DetailAnalyzeResponse.AnalyzeTokenList("my_tokenizer", tokens), - tokenfilters); - return new AnalyzeResponse(null, dar); - } - return new AnalyzeResponse(Arrays.asList(tokens), null); - } - - private AnalyzeResponse.AnalyzeToken randomToken() { - String token = randomAlphaOfLengthBetween(1, 20); - int position = randomIntBetween(0, 1000); - int startOffset = randomIntBetween(0, 1000); - int endOffset = randomIntBetween(0, 1000); - int posLength = randomIntBetween(1, 5); - String type = randomAlphaOfLengthBetween(1, 20); - Map extras = new HashMap<>(); - if (randomBoolean()) { - int entryCount = randomInt(6); - for (int i = 0; i < entryCount; i++) { - switch (randomInt(6)) { - case 0: - case 1: - case 2: - case 3: - String key = randomAlphaOfLength(5); - String value = randomAlphaOfLength(10); - extras.put(key, value); - break; - case 4: - String objkey = randomAlphaOfLength(5); - Map obj = new HashMap<>(); - obj.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); - extras.put(objkey, obj); - break; - case 5: - String listkey = randomAlphaOfLength(5); - List list = new ArrayList<>(); - list.add(randomAlphaOfLength(4)); - list.add(randomAlphaOfLength(6)); - extras.put(listkey, list); - break; - } - } - } - return new AnalyzeResponse.AnalyzeToken(token, position, startOffset, endOffset, posLength, type, extras); - } +public class AnalyzeResponseTests extends ESTestCase { + @SuppressWarnings("unchecked") public void testNullResponseToXContent() throws IOException { - DetailAnalyzeResponse.CharFilteredText[] charfilters = null; + AnalyzeAction.CharFilteredText[] charfilters = null; String name = "test_tokens_null"; - AnalyzeResponse.AnalyzeToken[] tokens = null; - DetailAnalyzeResponse.AnalyzeTokenList tokenizer = null; + AnalyzeAction.AnalyzeToken[] tokens = null; + AnalyzeAction.AnalyzeTokenList tokenizer = null; - DetailAnalyzeResponse.AnalyzeTokenList tokenfiltersItem = new DetailAnalyzeResponse.AnalyzeTokenList(name, tokens); - DetailAnalyzeResponse.AnalyzeTokenList[] tokenfilters = {tokenfiltersItem}; + AnalyzeAction.AnalyzeTokenList tokenfiltersItem = new AnalyzeAction.AnalyzeTokenList(name, tokens); + AnalyzeAction.AnalyzeTokenList[] tokenfilters = {tokenfiltersItem}; - DetailAnalyzeResponse detail = new DetailAnalyzeResponse(charfilters, tokenizer, tokenfilters); + AnalyzeAction.DetailAnalyzeResponse detail = new AnalyzeAction.DetailAnalyzeResponse(charfilters, tokenizer, tokenfilters); - AnalyzeResponse response = new AnalyzeResponse(null, detail); + AnalyzeAction.Response response = new AnalyzeAction.Response(null, detail); try (XContentBuilder builder = JsonXContent.contentBuilder()) { response.toXContent(builder, ToXContent.EMPTY_PARAMS); Map converted = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java index 51d3ecc89afc7..9cb435762aa19 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/exists/IndicesExistsIT.java @@ -30,7 +30,7 @@ import java.io.IOException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; -@ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, autoMinMasterNodes = false) +@ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0, autoManageMasterNodes = false) public class IndicesExistsIT extends ESIntegTestCase { public void testIndexExistsWithBlocksInPlace() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index b570ec8f781a6..3f3e20d95d328 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -412,16 +413,29 @@ public void testUseDefaultPipelineWithAlias() throws Exception { } public void testUseDefaultPipelineWithBulkUpsert() throws Exception { + String indexRequestName = randomFrom(new String[]{null, WITH_DEFAULT_PIPELINE, WITH_DEFAULT_PIPELINE_ALIAS}); + validatePipelineWithBulkUpsert(indexRequestName, WITH_DEFAULT_PIPELINE); + } + + public void testUseDefaultPipelineWithBulkUpsertWithAlias() throws Exception { + String indexRequestName = randomFrom(new String[]{null, WITH_DEFAULT_PIPELINE, WITH_DEFAULT_PIPELINE_ALIAS}); + validatePipelineWithBulkUpsert(indexRequestName, WITH_DEFAULT_PIPELINE_ALIAS); + } + + private void validatePipelineWithBulkUpsert(@Nullable String indexRequestIndexName, String updateRequestIndexName) throws Exception { Exception exception = new Exception("fake exception"); BulkRequest bulkRequest = new BulkRequest(); - IndexRequest indexRequest1 = new IndexRequest(WITH_DEFAULT_PIPELINE, "type", "id1").source(Collections.emptyMap()); - IndexRequest indexRequest2 = new IndexRequest(WITH_DEFAULT_PIPELINE, "type", "id2").source(Collections.emptyMap()); - IndexRequest indexRequest3 = new IndexRequest(WITH_DEFAULT_PIPELINE, "type", "id3").source(Collections.emptyMap()); - UpdateRequest upsertRequest = new UpdateRequest(WITH_DEFAULT_PIPELINE, "type", "id1").upsert(indexRequest1).script(mockScript("1")); - UpdateRequest docAsUpsertRequest = new UpdateRequest(WITH_DEFAULT_PIPELINE, "type", "id2").doc(indexRequest2).docAsUpsert(true); + IndexRequest indexRequest1 = new IndexRequest(indexRequestIndexName, "type", "id1").source(Collections.emptyMap()); + IndexRequest indexRequest2 = new IndexRequest(indexRequestIndexName, "type", "id2").source(Collections.emptyMap()); + IndexRequest indexRequest3 = new IndexRequest(indexRequestIndexName, "type", "id3").source(Collections.emptyMap()); + UpdateRequest upsertRequest = new UpdateRequest(updateRequestIndexName, "type", "id1") + .upsert(indexRequest1).script(mockScript("1")); + UpdateRequest docAsUpsertRequest = new UpdateRequest(updateRequestIndexName, "type", "id2") + .doc(indexRequest2).docAsUpsert(true); // this test only covers the mechanics that scripted bulk upserts will execute a default pipeline. However, in practice scripted // bulk upserts with a default pipeline are a bit surprising since the script executes AFTER the pipeline. - UpdateRequest scriptedUpsert = new UpdateRequest(WITH_DEFAULT_PIPELINE, "type", "id2").upsert(indexRequest3).script(mockScript("1")) + UpdateRequest scriptedUpsert = new UpdateRequest(updateRequestIndexName, "type", "id2") + .upsert(indexRequest3).script(mockScript("1")) .scriptedUpsert(true); bulkRequest.add(upsertRequest).add(docAsUpsertRequest).add(scriptedUpsert); diff --git a/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java b/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java index 1317183f286b3..45d8f4c8c0bf0 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java @@ -39,7 +39,7 @@ import static org.hamcrest.Matchers.equalTo; -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) public class IndexingMasterFailoverIT extends ESIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index cb1443bdf3765..164c74423aa7f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -62,7 +62,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -@ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false) +@ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.cluster.coordination:TRACE") public class MinimumMasterNodesIT extends ESIntegTestCase { diff --git a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index 38b9579eff046..7e4c1c5c3435e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -39,7 +39,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -@ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false) +@ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) @TestLogging("_root:DEBUG,org.elasticsearch.action.admin.cluster.state:TRACE") public class SpecificMasterNodesIT extends ESIntegTestCase { diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java index b8168ce3de4a7..0225ffe8fe6cd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinTaskExecutorTests.java @@ -54,8 +54,7 @@ public void testPreventJoinClusterWithUnsupportedIndices() { Settings.builder().build(); MetaData.Builder metaBuilder = MetaData.builder(); IndexMetaData indexMetaData = IndexMetaData.builder("test") - .settings(settings(VersionUtils.getPreviousVersion(Version.CURRENT - .minimumIndexCompatibilityVersion()))) + .settings(settings(Version.fromString("6.8.0"))) // latest V6 released version .numberOfShards(1) .numberOfReplicas(1).build(); metaBuilder.put(indexMetaData, false); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 44f4d7bf4aa53..cb7a6916bb68d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -52,7 +52,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.cluster.coordination:TRACE") public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index 3de716acfee44..cda92d3953ea3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -433,12 +433,6 @@ public void testCalculateNumRoutingShards() { assertEquals(2048, MetaDataCreateIndexService.calculateNumRoutingShards(1024, Version.CURRENT)); assertEquals(4096, MetaDataCreateIndexService.calculateNumRoutingShards(2048, Version.CURRENT)); - Version latestV6 = VersionUtils.getPreviousVersion(Version.V_7_0_0); - int numShards = randomIntBetween(1, 1000); - assertEquals(numShards, MetaDataCreateIndexService.calculateNumRoutingShards(numShards, latestV6)); - assertEquals(numShards, MetaDataCreateIndexService.calculateNumRoutingShards(numShards, - VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), latestV6))); - for (int i = 0; i < 1000; i++) { int randomNumShards = randomIntBetween(1, 10000); int numRoutingShards = MetaDataCreateIndexService.calculateNumRoutingShards(randomNumShards, Version.CURRENT); diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index 1b90ea691c1cd..ead77e71bf816 100644 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -50,7 +50,7 @@ numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false, - autoMinMasterNodes = false) + autoManageMasterNodes = false) public class SingleNodeDiscoveryIT extends ESIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java b/server/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java index 86976d553fa2a..84188f80aaed3 100644 --- a/server/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesIT.java @@ -34,7 +34,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; -@ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false) +@ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) public class RecoverAfterNodesIT extends ESIntegTestCase { private static final TimeValue BLOCK_WAIT_TIMEOUT = TimeValue.timeValueSeconds(10); diff --git a/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryBuilderTests.java deleted file mode 100644 index d02b60c52d531..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryBuilderTests.java +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.apache.lucene.index.Term; -import org.apache.lucene.queries.ExtendedCommonTermsQuery; -import org.apache.lucene.search.Query; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.test.AbstractQueryTestCase; - -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.index.query.QueryBuilders.commonTermsQuery; -import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.nullValue; - -public class CommonTermsQueryBuilderTests extends AbstractQueryTestCase { - - @Override - protected CommonTermsQueryBuilder doCreateTestQueryBuilder() { - int numberOfTerms = randomIntBetween(0, 10); - StringBuilder text = new StringBuilder(""); - for (int i = 0; i < numberOfTerms; i++) { - text.append(randomAlphaOfLengthBetween(1, 10)).append(" "); - } - - String fieldName = randomFrom(STRING_FIELD_NAME, - STRING_ALIAS_FIELD_NAME, - randomAlphaOfLengthBetween(1, 10)); - CommonTermsQueryBuilder query = new CommonTermsQueryBuilder(fieldName, text.toString()); - - if (randomBoolean()) { - query.cutoffFrequency(randomIntBetween(1, 10)); - } - - if (randomBoolean()) { - query.lowFreqOperator(randomFrom(Operator.values())); - } - - // number of low frequency terms that must match - if (randomBoolean()) { - query.lowFreqMinimumShouldMatch("" + randomIntBetween(1, 5)); - } - - if (randomBoolean()) { - query.highFreqOperator(randomFrom(Operator.values())); - } - - // number of high frequency terms that must match - if (randomBoolean()) { - query.highFreqMinimumShouldMatch("" + randomIntBetween(1, 5)); - } - - if (randomBoolean()) { - query.analyzer(randomAnalyzer()); - } - - return query; - } - - @Override - protected Map getAlternateVersions() { - Map alternateVersions = new HashMap<>(); - CommonTermsQueryBuilder commonTermsQuery = new CommonTermsQueryBuilder(randomAlphaOfLengthBetween(1, 10), - randomAlphaOfLengthBetween(1, 10)); - String contentString = "{\n" + - " \"common\" : {\n" + - " \"" + commonTermsQuery.fieldName() + "\" : \"" + commonTermsQuery.value() + "\"\n" + - " }\n" + - "}"; - alternateVersions.put(contentString, commonTermsQuery); - return alternateVersions; - } - - @Override - protected void doAssertLuceneQuery(CommonTermsQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { - assertThat(query, instanceOf(ExtendedCommonTermsQuery.class)); - ExtendedCommonTermsQuery extendedCommonTermsQuery = (ExtendedCommonTermsQuery) query; - - List terms = extendedCommonTermsQuery.getTerms(); - if (!terms.isEmpty()) { - String expectedFieldName = expectedFieldName(queryBuilder.fieldName()); - String actualFieldName = terms.iterator().next().field(); - assertThat(actualFieldName, equalTo(expectedFieldName)); - } - - assertThat(extendedCommonTermsQuery.getHighFreqMinimumNumberShouldMatchSpec(), equalTo(queryBuilder.highFreqMinimumShouldMatch())); - assertThat(extendedCommonTermsQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo(queryBuilder.lowFreqMinimumShouldMatch())); - } - - @Override - public void testUnknownField() throws IOException { - super.testUnknownField(); - assertDeprecationWarning(); - } - - @Override - public void testUnknownObjectException() throws IOException { - super.testUnknownObjectException(); - assertDeprecationWarning(); - } - - @Override - public void testFromXContent() throws IOException { - super.testFromXContent(); - assertDeprecationWarning(); - } - - @Override - public void testValidOutput() throws IOException { - super.testValidOutput(); - assertDeprecationWarning(); - } - - public void testIllegalArguments() { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new CommonTermsQueryBuilder(null, "text")); - assertEquals("field name is null or empty", e.getMessage()); - e = expectThrows(IllegalArgumentException.class, () -> new CommonTermsQueryBuilder("", "text")); - assertEquals("field name is null or empty", e.getMessage()); - e = expectThrows(IllegalArgumentException.class, () -> new CommonTermsQueryBuilder("fieldName", null)); - assertEquals("text cannot be null", e.getMessage()); - } - - public void testFromJson() throws IOException { - String query = - "{\n" + - " \"common\" : {\n" + - " \"body\" : {\n" + - " \"query\" : \"nelly the elephant not as a cartoon\",\n" + - " \"high_freq_operator\" : \"AND\",\n" + - " \"low_freq_operator\" : \"OR\",\n" + - " \"cutoff_frequency\" : 0.001,\n" + - " \"minimum_should_match\" : {\n" + - " \"low_freq\" : \"2\",\n" + - " \"high_freq\" : \"3\"\n" + - " },\n" + - " \"boost\" : 42.0\n" + - " }\n" + - " }\n" + - "}"; - - CommonTermsQueryBuilder queryBuilder = (CommonTermsQueryBuilder) parseQuery(query); - checkGeneratedJson(query, queryBuilder); - - assertEquals(query, 42, queryBuilder.boost, 0.00001); - assertEquals(query, 0.001, queryBuilder.cutoffFrequency(), 0.0001); - assertEquals(query, Operator.OR, queryBuilder.lowFreqOperator()); - assertEquals(query, Operator.AND, queryBuilder.highFreqOperator()); - assertEquals(query, "nelly the elephant not as a cartoon", queryBuilder.value()); - - assertDeprecationWarning(); - } - - public void testCommonTermsQuery1() throws IOException { - String query = copyToStringFromClasspath("/org/elasticsearch/index/query/commonTerms-query1.json"); - Query parsedQuery = parseQuery(query).toQuery(createShardContext()); - assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class)); - ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery; - assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), nullValue()); - assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("2")); - - assertDeprecationWarning(); - } - - public void testCommonTermsQuery2() throws IOException { - String query = copyToStringFromClasspath("/org/elasticsearch/index/query/commonTerms-query2.json"); - Query parsedQuery = parseQuery(query).toQuery(createShardContext()); - assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class)); - ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery; - assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), equalTo("50%")); - assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("5<20%")); - - assertDeprecationWarning(); - } - - public void testCommonTermsQuery3() throws IOException { - String query = copyToStringFromClasspath("/org/elasticsearch/index/query/commonTerms-query3.json"); - Query parsedQuery = parseQuery(query).toQuery(createShardContext()); - assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class)); - ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery; - assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), nullValue()); - assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("2")); - - assertDeprecationWarning(); - } - - // see #11730 - public void testCommonTermsQuery4() throws IOException { - Query parsedQuery = parseQuery(commonTermsQuery("field", "text")).toQuery(createShardContext()); - assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class)); - - assertDeprecationWarning(); - } - - public void testParseFailsWithMultipleFields() throws IOException { - String json = "{\n" + - " \"common\" : {\n" + - " \"message1\" : {\n" + - " \"query\" : \"nelly the elephant not as a cartoon\"\n" + - " },\n" + - " \"message2\" : {\n" + - " \"query\" : \"nelly the elephant not as a cartoon\"\n" + - " }\n" + - " }\n" + - "}"; - - ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json)); - assertEquals("[common] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); - - String shortJson = "{\n" + - " \"common\" : {\n" + - " \"message1\" : \"nelly the elephant not as a cartoon\",\n" + - " \"message2\" : \"nelly the elephant not as a cartoon\"\n" + - " }\n" + - "}"; - e = expectThrows(ParsingException.class, () -> parseQuery(shortJson)); - assertEquals("[common] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); - - assertDeprecationWarning(); - } - - private void assertDeprecationWarning() { - assertWarnings("Deprecated field [common] used, replaced by [" + CommonTermsQueryBuilder.COMMON_TERMS_QUERY_DEPRECATION_MSG + "]"); - } -} diff --git a/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryParserTests.java b/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryParserTests.java deleted file mode 100644 index f393683a10f7f..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/query/CommonTermsQueryParserTests.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.test.ESSingleNodeTestCase; - -public class CommonTermsQueryParserTests extends ESSingleNodeTestCase { - public void testWhenParsedQueryIsNullNoNullPointerExceptionIsThrown() { - final String index = "test-index"; - final String type = "test-type"; - client() - .admin() - .indices() - .prepareCreate(index) - .addMapping(type, "name", "type=text,analyzer=stop") - .execute() - .actionGet(); - ensureGreen(); - - CommonTermsQueryBuilder commonTermsQueryBuilder = - new CommonTermsQueryBuilder("name", "the").queryName("query-name"); - - // the named query parses to null; we are testing this does not cause a NullPointerException - SearchResponse response = - client().prepareSearch(index).setQuery(commonTermsQueryBuilder).execute().actionGet(); - - assertNotNull(response); - assertEquals(response.getHits().getHits().length, 0); - } -} diff --git a/server/src/test/java/org/elasticsearch/index/query/IntervalBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/IntervalBuilderTests.java index 15ec8af0af2c5..69464edb51332 100644 --- a/server/src/test/java/org/elasticsearch/index/query/IntervalBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/IntervalBuilderTests.java @@ -110,6 +110,12 @@ public void testPhraseWithStopword() throws IOException { } + public void testEmptyTokenStream() throws IOException { + CannedTokenStream ts = new CannedTokenStream(); + IntervalsSource source = BUILDER.analyzeText(new CachingTokenFilter(ts), 0, true); + assertSame(IntervalBuilder.NO_INTERVALS, source); + } + public void testSimpleSynonyms() throws IOException { CannedTokenStream ts = new CannedTokenStream( diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index f79bbb86242d9..76ea5aa9dc6a0 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -23,7 +23,6 @@ import org.apache.lucene.analysis.CannedBinaryTokenStream; import org.apache.lucene.analysis.MockSynonymAnalyzer; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.ExtendedCommonTermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.FuzzyQuery; @@ -181,18 +180,6 @@ protected void doAssertLuceneQuery(MatchQueryBuilder queryBuilder, Query query, } } - if (query instanceof ExtendedCommonTermsQuery) { - assertTrue(queryBuilder.cutoffFrequency() != null); - ExtendedCommonTermsQuery ectq = (ExtendedCommonTermsQuery) query; - List terms = ectq.getTerms(); - if (!terms.isEmpty()) { - Term term = terms.iterator().next(); - String expectedFieldName = expectedFieldName(queryBuilder.fieldName()); - assertThat(term.field(), equalTo(expectedFieldName)); - } - assertEquals(queryBuilder.cutoffFrequency(), ectq.getMaxTermFrequency(), Float.MIN_VALUE); - } - if (query instanceof FuzzyQuery) { assertTrue(queryBuilder.fuzziness() != null); FuzzyQuery fuzzyQuery = (FuzzyQuery) query; diff --git a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index 970a4c3a37ecb..cd77a940a80a1 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.ExtendedCommonTermsQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; @@ -171,7 +170,6 @@ protected void doAssertLuceneQuery(MultiMatchQueryBuilder queryBuilder, Query qu instanceOf(FuzzyQuery.class), instanceOf(MultiPhrasePrefixQuery.class), instanceOf(MatchAllDocsQuery.class), - instanceOf(ExtendedCommonTermsQuery.class), instanceOf(MatchNoDocsQuery.class), instanceOf(PhraseQuery.class), instanceOf(PointRangeQuery.class), diff --git a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java index 58baadd83573d..30438c49998ab 100644 --- a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java @@ -129,7 +129,7 @@ public void testBlendTerms() { Query expected = BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f); Query actual = MultiMatchQuery.blendTerm( indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null), - new BytesRef("baz"), null, 1f, false, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); + new BytesRef("baz"), 1f, false, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); assertEquals(expected, actual); } @@ -145,7 +145,7 @@ public void testBlendTermsWithFieldBoosts() { Query expected = BlendedTermQuery.dismaxBlendedQuery(terms, boosts, 1.0f); Query actual = MultiMatchQuery.blendTerm( indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null), - new BytesRef("baz"), null, 1f, false, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); + new BytesRef("baz"), 1f, false, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); assertEquals(expected, actual); } @@ -167,7 +167,7 @@ public Query termQuery(Object value, QueryShardContext context) { ), 1f); Query actual = MultiMatchQuery.blendTerm( indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null), - new BytesRef("baz"), null, 1f, true, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); + new BytesRef("baz"), 1f, true, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); assertEquals(expected, actual); } @@ -181,7 +181,7 @@ public Query termQuery(Object value, QueryShardContext context) { ft.setName("bar"); expectThrows(IllegalArgumentException.class, () -> MultiMatchQuery.blendTerm( indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null), - new BytesRef("baz"), null, 1f, false, Arrays.asList(new FieldAndBoost(ft, 1)))); + new BytesRef("baz"), 1f, false, Arrays.asList(new FieldAndBoost(ft, 1)))); } public void testBlendNoTermQuery() { @@ -205,7 +205,7 @@ public Query termQuery(Object value, QueryShardContext context) { ), 1.0f); Query actual = MultiMatchQuery.blendTerm( indexService.newQueryShardContext(randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null), - new BytesRef("baz"), null, 1f, false, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); + new BytesRef("baz"), 1f, false, Arrays.asList(new FieldAndBoost(ft1, 2), new FieldAndBoost(ft2, 3))); assertEquals(expected, actual); } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java index 7d06e25519ac9..c3ad511783b39 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java @@ -19,16 +19,10 @@ package org.elasticsearch.index.translog; -import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.store.OutputStreamDataOutput; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; -import java.nio.channels.Channels; import java.nio.channels.FileChannel; import java.nio.file.Files; import java.nio.file.Path; @@ -36,7 +30,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.lessThan; public class TranslogHeaderTests extends ESTestCase { @@ -72,40 +65,10 @@ public void testCurrentHeaderVersion() throws Exception { }); } - public void testHeaderWithoutPrimaryTerm() throws Exception { - final String translogUUID = UUIDs.randomBase64UUID(); - final long generation = randomNonNegativeLong(); - final Path translogFile = createTempDir().resolve(Translog.getFilename(generation)); - try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) { - writeHeaderWithoutTerm(channel, translogUUID); - assertThat((int)channel.position(), lessThan(TranslogHeader.headerSizeInBytes(translogUUID))); - } - try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { - final TranslogHeader inHeader = TranslogHeader.read(translogUUID, translogFile, channel); - assertThat(inHeader.getTranslogUUID(), equalTo(translogUUID)); - assertThat(inHeader.getPrimaryTerm(), equalTo(SequenceNumbers.UNASSIGNED_PRIMARY_TERM)); - assertThat(inHeader.sizeInBytes(), equalTo((int)channel.position())); - } - expectThrows(TranslogCorruptedException.class, () -> { - try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { - TranslogHeader.read(UUIDs.randomBase64UUID(), translogFile, channel); - } - }); - } - - static void writeHeaderWithoutTerm(FileChannel channel, String translogUUID) throws IOException { - final OutputStreamStreamOutput out = new OutputStreamStreamOutput(Channels.newOutputStream(channel)); - CodecUtil.writeHeader(new OutputStreamDataOutput(out), TranslogHeader.TRANSLOG_CODEC, TranslogHeader.VERSION_CHECKPOINTS); - final BytesRef uuid = new BytesRef(translogUUID); - out.writeInt(uuid.length); - out.writeBytes(uuid.bytes, uuid.offset, uuid.length); - channel.force(true); - assertThat(channel.position(), equalTo(43L)); - } - - public void testLegacyTranslogVersions() throws Exception { + public void testLegacyTranslogVersions() { checkFailsToOpen("/org/elasticsearch/index/translog/translog-v0.binary", IllegalStateException.class, "pre-1.4 translog"); checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1.binary", IllegalStateException.class, "pre-2.0 translog"); + checkFailsToOpen("/org/elasticsearch/index/translog/translog-v2.binary", IllegalStateException.class, "pre-6.3 translog"); checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-truncated.binary", IllegalStateException.class, "pre-2.0 translog"); checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-corrupted-magic.binary", TranslogCorruptedException.class, "translog looks like version 1 or later, but has corrupted header"); diff --git a/server/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java b/server/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java index 4511c59c6b3f0..10a1ffe5c7b5e 100644 --- a/server/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java +++ b/server/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java @@ -19,8 +19,8 @@ package org.elasticsearch.indices.analyze; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequestBuilder; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -53,9 +53,9 @@ public void testSimpleAnalyzerTests() throws Exception { ensureGreen(); for (int i = 0; i < 10; i++) { - AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze(indexOrAlias(), "this is a test").get(); + AnalyzeAction.Response analyzeResponse = client().admin().indices().prepareAnalyze(indexOrAlias(), "this is a test").get(); assertThat(analyzeResponse.getTokens().size(), equalTo(4)); - AnalyzeResponse.AnalyzeToken token = analyzeResponse.getTokens().get(0); + AnalyzeAction.AnalyzeToken token = analyzeResponse.getTokens().get(0); assertThat(token.getTerm(), equalTo("this")); assertThat(token.getStartOffset(), equalTo(0)); assertThat(token.getEndOffset(), equalTo(4)); @@ -94,7 +94,7 @@ public void testAnalyzeNumericField() throws IOException { } public void testAnalyzeWithNoIndex() throws Exception { - AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST").setAnalyzer("simple").get(); + AnalyzeAction.Response analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST").setAnalyzer("simple").get(); assertThat(analyzeResponse.getTokens().size(), equalTo(4)); analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST").setTokenizer("keyword").addTokenFilter("lowercase") @@ -105,7 +105,7 @@ public void testAnalyzeWithNoIndex() throws Exception { analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST").setTokenizer("standard").addTokenFilter("lowercase") .get(); assertThat(analyzeResponse.getTokens().size(), equalTo(4)); - AnalyzeResponse.AnalyzeToken token = analyzeResponse.getTokens().get(0); + AnalyzeAction.AnalyzeToken token = analyzeResponse.getTokens().get(0); assertThat(token.getTerm(), equalTo("this")); token = analyzeResponse.getTokens().get(1); assertThat(token.getTerm(), equalTo("is")); @@ -134,9 +134,9 @@ public void testAnalyzerWithFieldOrTypeTests() throws Exception { final AnalyzeRequestBuilder requestBuilder = client().admin().indices().prepareAnalyze("THIS IS A TEST"); requestBuilder.setIndex(indexOrAlias()); requestBuilder.setField("document.simple"); - AnalyzeResponse analyzeResponse = requestBuilder.get(); + AnalyzeAction.Response analyzeResponse = requestBuilder.get(); assertThat(analyzeResponse.getTokens().size(), equalTo(4)); - AnalyzeResponse.AnalyzeToken token = analyzeResponse.getTokens().get(3); + AnalyzeAction.AnalyzeToken token = analyzeResponse.getTokens().get(3); assertThat(token.getTerm(), equalTo("test")); assertThat(token.getStartOffset(), equalTo(10)); assertThat(token.getEndOffset(), equalTo(14)); @@ -146,7 +146,7 @@ public void testAnalyzerWithFieldOrTypeTests() throws Exception { // issue #5974 public void testThatStandardAndDefaultAnalyzersAreSame() throws Exception { - AnalyzeResponse response = client().admin().indices().prepareAnalyze("this is a test").setAnalyzer("standard").get(); + AnalyzeAction.Response response = client().admin().indices().prepareAnalyze("this is a test").setAnalyzer("standard").get(); assertTokens(response, "this", "is", "a", "test"); response = client().admin().indices().prepareAnalyze("this is a test").setAnalyzer("default").get(); @@ -156,7 +156,7 @@ public void testThatStandardAndDefaultAnalyzersAreSame() throws Exception { assertTokens(response, "this", "is", "a", "test"); } - private void assertTokens(AnalyzeResponse response, String ... tokens) { + private void assertTokens(AnalyzeAction.Response response, String ... tokens) { assertThat(response.getTokens(), hasSize(tokens.length)); for (int i = 0; i < tokens.length; i++) { assertThat(response.getTokens().get(i).getTerm(), is(tokens[i])); @@ -180,9 +180,9 @@ public void testAnalyzerWithMultiValues() throws Exception { requestBuilder.setText(texts); requestBuilder.setIndex(indexOrAlias()); requestBuilder.setField("simple"); - AnalyzeResponse analyzeResponse = requestBuilder.get(); + AnalyzeAction.Response analyzeResponse = requestBuilder.get(); assertThat(analyzeResponse.getTokens().size(), equalTo(7)); - AnalyzeResponse.AnalyzeToken token = analyzeResponse.getTokens().get(3); + AnalyzeAction.AnalyzeToken token = analyzeResponse.getTokens().get(3); assertThat(token.getTerm(), equalTo("test")); assertThat(token.getPosition(), equalTo(3)); assertThat(token.getStartOffset(), equalTo(10)); @@ -199,7 +199,7 @@ public void testAnalyzerWithMultiValues() throws Exception { public void testDetailAnalyzeWithNoIndex() throws Exception { //analyzer only - AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST") + AnalyzeAction.Response analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST") .setExplain(true).setAnalyzer("simple").get(); assertThat(analyzeResponse.detail().tokenizer(), IsNull.nullValue()); @@ -211,7 +211,7 @@ public void testDetailAnalyzeWithNoIndex() throws Exception { public void testDetailAnalyzeCustomAnalyzerWithNoIndex() throws Exception { //analyzer only - AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST") + AnalyzeAction.Response analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST") .setExplain(true).setAnalyzer("simple").get(); assertThat(analyzeResponse.detail().tokenizer(), IsNull.nullValue()); @@ -257,12 +257,12 @@ public void testDetailAnalyzeWithMultiValues() throws Exception { .setType("document").setSource("simple", "type=text,analyzer=simple,position_increment_gap=100").get(); String[] texts = new String[]{"THIS IS A TEST", "THE SECOND TEXT"}; - AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze().setIndex(indexOrAlias()).setText(texts) + AnalyzeAction.Response analyzeResponse = client().admin().indices().prepareAnalyze().setIndex(indexOrAlias()).setText(texts) .setExplain(true).setField("simple").setText(texts).execute().get(); assertThat(analyzeResponse.detail().analyzer().getName(), equalTo("simple")); assertThat(analyzeResponse.detail().analyzer().getTokens().length, equalTo(7)); - AnalyzeResponse.AnalyzeToken token = analyzeResponse.detail().analyzer().getTokens()[3]; + AnalyzeAction.AnalyzeToken token = analyzeResponse.detail().analyzer().getTokens()[3]; assertThat(token.getTerm(), equalTo("test")); assertThat(token.getPosition(), equalTo(3)); @@ -292,7 +292,7 @@ public void testCustomTokenFilterInRequest() throws Exception { Map stopFilterSettings = new HashMap<>(); stopFilterSettings.put("type", "stop"); stopFilterSettings.put("stopwords", new String[]{"foo", "buzz"}); - AnalyzeResponse analyzeResponse = client().admin().indices() + AnalyzeAction.Response analyzeResponse = client().admin().indices() .prepareAnalyze() .setText("Foo buzz test") .setTokenizer("standard") @@ -359,9 +359,9 @@ public void testAnalyzeKeywordField() throws IOException { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).addMapping("test", "keyword", "type=keyword")); ensureGreen("test"); - AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze(indexOrAlias(), "ABC").setField("keyword").get(); + AnalyzeAction.Response analyzeResponse = client().admin().indices().prepareAnalyze(indexOrAlias(), "ABC").setField("keyword").get(); assertThat(analyzeResponse.getTokens().size(), equalTo(1)); - AnalyzeResponse.AnalyzeToken token = analyzeResponse.getTokens().get(0); + AnalyzeAction.AnalyzeToken token = analyzeResponse.getTokens().get(0); assertThat(token.getTerm(), equalTo("ABC")); assertThat(token.getStartOffset(), equalTo(0)); assertThat(token.getEndOffset(), equalTo(3)); @@ -377,9 +377,9 @@ public void testAnalyzeNormalizedKeywordField() throws IOException { .addMapping("test", "keyword", "type=keyword,normalizer=my_normalizer")); ensureGreen("test"); - AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze(indexOrAlias(), "ABC").setField("keyword").get(); + AnalyzeAction.Response analyzeResponse = client().admin().indices().prepareAnalyze(indexOrAlias(), "ABC").setField("keyword").get(); assertThat(analyzeResponse.getTokens().size(), equalTo(1)); - AnalyzeResponse.AnalyzeToken token = analyzeResponse.getTokens().get(0); + AnalyzeAction.AnalyzeToken token = analyzeResponse.getTokens().get(0); assertThat(token.getTerm(), equalTo("abc")); assertThat(token.getStartOffset(), equalTo(0)); assertThat(token.getEndOffset(), equalTo(3)); diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index b9459b926d372..deafe203ac8dc 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -171,7 +171,15 @@ public void testDesktopServicesStoreFiles() throws IOException { if (Constants.WINDOWS) { assertThat(e.getCause(), instanceOf(NoSuchFileException.class)); } else { - assertThat(e.getCause(), hasToString(containsString("Not a directory"))); + // force a "Not a directory" exception to be thrown so that we can extract the locale-dependent message + final String expected; + try (InputStream ignored = Files.newInputStream(desktopServicesStore.resolve("not-a-directory"))) { + throw new AssertionError(); + } catch (final FileSystemException inner) { + // locale-dependent translation of "Not a directory" + expected = inner.getReason(); + } + assertThat(e.getCause(), hasToString(containsString(expected))); } } } diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java index 406e9b1d36c07..1cd79b3ae0c47 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; +import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; @@ -29,9 +29,11 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.startsWith; import static org.mockito.Mockito.mock; public class RestAnalyzeActionTests extends ESTestCase { @@ -44,15 +46,13 @@ public void testParseXContentForAnalyzeRequest() throws Exception { .array("filter", "lowercase") .endObject())) { - AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test"); - - RestAnalyzeAction.buildFromContent(content, analyzeRequest); + AnalyzeAction.Request analyzeRequest = AnalyzeAction.Request.fromXContent(content, "for test"); assertThat(analyzeRequest.text().length, equalTo(1)); assertThat(analyzeRequest.text(), equalTo(new String[]{"THIS IS A TEST"})); assertThat(analyzeRequest.tokenizer().name, equalTo("keyword")); assertThat(analyzeRequest.tokenFilters().size(), equalTo(1)); - for (AnalyzeRequest.NameOrDefinition filter : analyzeRequest.tokenFilters()) { + for (AnalyzeAction.Request.NameOrDefinition filter : analyzeRequest.tokenFilters()) { assertThat(filter.name, equalTo("lowercase")); } } @@ -79,9 +79,7 @@ public void testParseXContentForAnalyzeRequestWithCustomFilters() throws Excepti .field("normalizer", "normalizer") .endObject())) { - AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test"); - - RestAnalyzeAction.buildFromContent(content, analyzeRequest); + AnalyzeAction.Request analyzeRequest = AnalyzeAction.Request.fromXContent(content, "for test"); assertThat(analyzeRequest.text().length, equalTo(1)); assertThat(analyzeRequest.text(), equalTo(new String[]{"THIS IS A TEST"})); @@ -95,48 +93,45 @@ public void testParseXContentForAnalyzeRequestWithCustomFilters() throws Excepti } } - public void testParseXContentForAnalyzeRequestWithInvalidJsonThrowsException() throws Exception { + public void testParseXContentForAnalyzeRequestWithInvalidJsonThrowsException() { RestAnalyzeAction action = new RestAnalyzeAction(Settings.EMPTY, mock(RestController.class)); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) .withContent(new BytesArray("{invalid_json}"), XContentType.JSON).build(); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> action.handleRequest(request, null, null)); - assertThat(e.getMessage(), equalTo("Failed to parse request body")); + IOException e = expectThrows(IOException.class, () -> action.handleRequest(request, null, null)); + assertThat(e.getMessage(), containsString("expecting double-quote")); } public void testParseXContentForAnalyzeRequestWithUnknownParamThrowsException() throws Exception { - AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test"); try (XContentParser invalidContent = createParser(XContentFactory.jsonBuilder() .startObject() .field("text", "THIS IS A TEST") .field("unknown", "keyword") .endObject())) { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> RestAnalyzeAction.buildFromContent(invalidContent, analyzeRequest)); - assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]")); + () -> AnalyzeAction.Request.fromXContent(invalidContent, "for test")); + assertThat(e.getMessage(), containsString("unknown field [unknown]")); } } public void testParseXContentForAnalyzeRequestWithInvalidStringExplainParamThrowsException() throws Exception { - AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test"); try (XContentParser invalidExplain = createParser(XContentFactory.jsonBuilder() .startObject() .field("explain", "fals") .endObject())) { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> RestAnalyzeAction.buildFromContent(invalidExplain, analyzeRequest)); - assertThat(e.getMessage(), startsWith("explain must be either 'true' or 'false'")); + () -> AnalyzeAction.Request.fromXContent(invalidExplain, "for test")); + assertThat(e.getMessage(), containsString("failed to parse field [explain]")); } } public void testParseXContentForAnalyzeRequestWithInvalidNormalizerThrowsException() throws Exception { - AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test"); try (XContentParser invalidExplain = createParser(XContentFactory.jsonBuilder() .startObject() .field("normalizer", true) .endObject())) { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> RestAnalyzeAction.buildFromContent(invalidExplain, analyzeRequest)); - assertThat(e.getMessage(), startsWith("normalizer should be normalizer's name")); + () -> AnalyzeAction.Request.fromXContent(invalidExplain, "for test")); + assertThat(e.getMessage(), containsString("normalizer doesn't support values of type: VALUE_BOOLEAN")); } } @@ -147,9 +142,9 @@ public void testDeprecatedParamIn2xException() throws Exception { .field("tokenizer", "keyword") .array("filters", "lowercase") .endObject())) { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> RestAnalyzeAction.buildFromContent(parser, - new AnalyzeRequest("for test"))); - assertThat(e.getMessage(), startsWith("Unknown parameter [filters]")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> AnalyzeAction.Request.fromXContent(parser,"for test")); + assertThat(e.getMessage(), containsString("unknown field [filters]")); } try (XContentParser parser = createParser(XContentFactory.jsonBuilder() @@ -158,9 +153,9 @@ public void testDeprecatedParamIn2xException() throws Exception { .field("tokenizer", "keyword") .array("token_filters", "lowercase") .endObject())) { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> RestAnalyzeAction.buildFromContent(parser, - new AnalyzeRequest("for test"))); - assertThat(e.getMessage(), startsWith("Unknown parameter [token_filters]")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> AnalyzeAction.Request.fromXContent(parser, "for test")); + assertThat(e.getMessage(), containsString("unknown field [token_filters]")); } try (XContentParser parser = createParser(XContentFactory.jsonBuilder() @@ -169,9 +164,9 @@ public void testDeprecatedParamIn2xException() throws Exception { .field("tokenizer", "keyword") .array("char_filters", "lowercase") .endObject())) { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> RestAnalyzeAction.buildFromContent(parser, - new AnalyzeRequest("for test"))); - assertThat(e.getMessage(), startsWith("Unknown parameter [char_filters]")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> AnalyzeAction.Request.fromXContent(parser, "for test")); + assertThat(e.getMessage(), containsString("unknown field [char_filters]")); } try (XContentParser parser = createParser(XContentFactory.jsonBuilder() @@ -180,9 +175,9 @@ public void testDeprecatedParamIn2xException() throws Exception { .field("tokenizer", "keyword") .array("token_filter", "lowercase") .endObject())) { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> RestAnalyzeAction.buildFromContent(parser, - new AnalyzeRequest("for test"))); - assertThat(e.getMessage(), startsWith("Unknown parameter [token_filter]")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> AnalyzeAction.Request.fromXContent(parser, "for test")); + assertThat(e.getMessage(), containsString("unknown field [token_filter]")); } } } diff --git a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java index 2e019d1e2c432..512c80d5c0ef8 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -354,7 +354,7 @@ public List> getRescorers() { }; //add here deprecated queries to make sure we log a deprecation warnings when they are used - private static final String[] DEPRECATED_QUERIES = new String[] {"common"}; + private static final String[] DEPRECATED_QUERIES = new String[] {}; /** * Dummy test {@link AggregationBuilder} used to test registering aggregation builders. diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index f5e601fd97abd..855eb7286010c 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.fetch.subphase.highlight; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; @@ -81,7 +80,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.boostingQuery; -import static org.elasticsearch.index.query.QueryBuilders.commonTermsQuery; import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.existsQuery; import static org.elasticsearch.index.query.QueryBuilders.fuzzyQuery; @@ -1440,41 +1438,6 @@ public void testBoostingQueryTermVector() throws IOException { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); } - public void testCommonTermsQuery() { - createIndex("test"); - ensureGreen(); - - client().prepareIndex("test", "type1") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog") - .get(); - refresh(); - - logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource() - .query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) - .highlighter(highlight().field("field2").order("score").preTags("").postTags("")); - - SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); - } - - public void testCommonTermsTermVector() throws IOException { - assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping())); - ensureGreen(); - - client().prepareIndex("test", "type1").setSource( - "field1", "this is a test", - "field2", "The quick brown fox jumps over the lazy dog").get(); - refresh(); - logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) - .highlighter(highlight().field("field2").order("score").preTags("").postTags("")); - - SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); - - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); - } - public void testPlainHighlightDifferentFragmenter() throws Exception { assertAcked(prepareCreate("test") .addMapping("type1", "tags", "type=text")); @@ -2295,24 +2258,6 @@ public void testPostingsHighlighterBoostingQuery() throws IOException { equalTo("The quick brown fox jumps over the lazy dog! Second sentence.")); } - public void testPostingsHighlighterCommonTermsQuery() throws IOException { - assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping())); - ensureGreen(); - - client().prepareIndex("test", "type1") - .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get(); - refresh(); - - logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) - .highlighter(highlight().field("field2").preTags("").postTags("")); - SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - assertHitCount(searchResponse, 1L); - - assertHighlight(searchResponse, 0, "field2", 0, 1, - equalTo("The quick brown fox jumps over the lazy dog! Second sentence.")); - } - private static XContentBuilder type1PostingsffsetsMapping() throws IOException { return XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties") diff --git a/server/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java index 5fec898155487..415fa40ea9db8 100644 --- a/server/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.query; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -72,8 +71,6 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.lessThan; public class MultiMatchQueryIT extends ESIntegTestCase { @@ -303,66 +300,6 @@ public void testSingleField() throws NoSuchFieldException, IllegalAccessExceptio } - public void testCutoffFreq() throws ExecutionException, InterruptedException { - final long numDocs = client().prepareSearch("test").setSize(0) - .setQuery(matchAllQuery()).get().getHits().getTotalHits().value; - MatchQuery.Type type = MatchQuery.Type.BOOLEAN; - Float cutoffFrequency = randomBoolean() ? Math.min(1, numDocs * 1.f / between(10, 20)) : 1.f / between(10, 20); - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category") - .operator(Operator.OR).cutoffFrequency(cutoffFrequency))).get(); - Set topNIds = Sets.newHashSet("theone", "theother"); - for (int i = 0; i < searchResponse.getHits().getHits().length; i++) { - topNIds.remove(searchResponse.getHits().getAt(i).getId()); - // very likely that we hit a random doc that has the same score so orders are random since - // the doc id is the tie-breaker - } - assertThat(topNIds, empty()); - assertThat(searchResponse.getHits().getHits()[0].getScore(), - greaterThanOrEqualTo(searchResponse.getHits().getHits()[1].getScore())); - - cutoffFrequency = randomBoolean() ? Math.min(1, numDocs * 1.f / between(10, 20)) : 1.f / between(10, 20); - searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category") - .operator(Operator.OR).cutoffFrequency(cutoffFrequency).type(type))).get(); - assertFirstHit(searchResponse, anyOf(hasId("theone"), hasId("theother"))); - assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore())); - long size = searchResponse.getHits().getTotalHits().value; - - searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category") - .operator(Operator.OR).type(type))).get(); - assertFirstHit(searchResponse, anyOf(hasId("theone"), hasId("theother"))); - assertThat("common terms expected to be a way smaller result set", size, lessThan(searchResponse.getHits().getTotalHits().value)); - - cutoffFrequency = randomBoolean() ? Math.min(1, numDocs * 1.f / between(10, 20)) : 1.f / between(10, 20); - searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category") - .operator(Operator.OR).cutoffFrequency(cutoffFrequency).type(type))).get(); - assertFirstHit(searchResponse, hasId("theother")); - - - searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category") - .operator(Operator.AND).cutoffFrequency(cutoffFrequency).type(type))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category") - .operator(Operator.AND).cutoffFrequency(cutoffFrequency).type(type))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theone")); - - searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("marvel hero", "first_name", "last_name", "category") - .operator(Operator.AND).cutoffFrequency(cutoffFrequency) - .analyzer("category") - .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("theother")); - } - public void testEquivalence() { final int numDocs = (int) client().prepareSearch("test").setSize(0) @@ -559,21 +496,11 @@ public void testCrossFieldMode() throws ExecutionException, InterruptedException .analyzer("category"))).get(); assertFirstHit(searchResponse, hasId("theone")); - searchResponse = client().prepareSearch("test") - .setQuery(randomizeType(multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category") - .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) - .cutoffFrequency(0.1f) - .analyzer("category") - .operator(Operator.OR))).get(); - assertFirstHit(searchResponse, anyOf(hasId("theother"), hasId("theone"))); - long numResults = searchResponse.getHits().getTotalHits().value; - searchResponse = client().prepareSearch("test") .setQuery(randomizeType(multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category") .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS) .analyzer("category") .operator(Operator.OR))).get(); - assertThat(numResults, lessThan(searchResponse.getHits().getTotalHits().value)); assertFirstHit(searchResponse, hasId("theone")); diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 2a7eb10313c51..afba40e2cb752 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -68,7 +68,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; -import static org.elasticsearch.index.query.QueryBuilders.commonTermsQuery; import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.existsQuery; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; @@ -101,7 +100,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThirdHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasScore; import static org.hamcrest.Matchers.closeTo; @@ -270,97 +268,6 @@ public void testAllDocsQueryString() throws InterruptedException, ExecutionExcep } } - public void testCommonTermsQuery() throws Exception { - - client().admin().indices().prepareCreate("test") - .addMapping("type1", "field1", "type=text,analyzer=whitespace") - .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1)).get(); - indexRandom(true, client().prepareIndex("test", "type1", "3").setSource("field1", "quick lazy huge brown pidgin", "field2", - "the quick lazy huge brown fox jumps over the tree"), - client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox"), - client().prepareIndex("test", "type1", "2").setSource("field1", "the quick lazy huge brown fox jumps over the tree") ); - - - SearchResponse searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the quick brown").cutoffFrequency(3) - .lowFreqOperator(Operator.OR)).get(); - assertHitCount(searchResponse, 3L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the quick brown").cutoffFrequency(3) - .lowFreqOperator(Operator.AND)).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - - // Default - searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the quick brown").cutoffFrequency(3)).get(); - assertHitCount(searchResponse, 3L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - - searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the huge fox").lowFreqMinimumShouldMatch("2")).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("2")); - - searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the lazy fox brown").cutoffFrequency(1) - .highFreqMinimumShouldMatch("3")).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("2")); - assertSecondHit(searchResponse, hasId("1")); - - searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the lazy fox brown").cutoffFrequency(1) - .highFreqMinimumShouldMatch("4")).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("2")); - - // Default - searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the lazy fox brown").cutoffFrequency(1)).get(); - assertHitCount(searchResponse, 1L); - assertFirstHit(searchResponse, hasId("2")); - - searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the quick brown").cutoffFrequency(3) - .analyzer("stop")).get(); - assertHitCount(searchResponse, 3L); - // stop drops "the" since its a stopword - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("3")); - assertThirdHit(searchResponse, hasId("2")); - - // try the same with match query - searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the quick brown").cutoffFrequency(3) - .operator(Operator.AND)).get(); - assertHitCount(searchResponse, 2L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - - searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the quick brown").cutoffFrequency(3) - .operator(Operator.OR)).get(); - assertHitCount(searchResponse, 3L); - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("2")); - assertThirdHit(searchResponse, hasId("3")); - - searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the quick brown").cutoffFrequency(3) - .operator(Operator.AND).analyzer("stop")).get(); - assertHitCount(searchResponse, 3L); - // stop drops "the" since its a stopword - assertFirstHit(searchResponse, hasId("1")); - assertSecondHit(searchResponse, hasId("3")); - assertThirdHit(searchResponse, hasId("2")); - - // try the same with multi match query - searchResponse = client().prepareSearch().setQuery(multiMatchQuery("the quick brown", "field1", "field2").cutoffFrequency(3) - .operator(Operator.AND)).get(); - assertHitCount(searchResponse, 3L); - assertFirstHit(searchResponse, hasId("3")); - assertSecondHit(searchResponse, hasId("1")); - assertThirdHit(searchResponse, hasId("2")); - } - public void testQueryStringAnalyzedWildcard() throws Exception { createIndex("test"); diff --git a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index 5f730ad138f96..7691b3346d72f 100644 --- a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -189,7 +189,7 @@ public void testExplainFilteredAlias() { assertThat(validateQueryResponse.getQueryExplanation().get(0).getExplanation(), containsString("field:value1")); } - public void testExplainWithRewriteValidateQuery() throws Exception { + public void testExplainWithRewriteValidateQuery() { client().admin().indices().prepareCreate("test") .addMapping("type1", "field", "type=text,analyzer=whitespace") .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1)).get(); @@ -205,18 +205,6 @@ public void testExplainWithRewriteValidateQuery() throws Exception { assertExplanation(QueryBuilders.matchPhrasePrefixQuery("field", "ju"), containsString("field:jumps"), true); - // common terms queries - assertExplanation(QueryBuilders.commonTermsQuery("field", "huge brown pidgin").cutoffFrequency(1), - containsString("+field:pidgin field:huge field:brown"), true); - assertExplanation(QueryBuilders.commonTermsQuery("field", "the brown").analyzer("stop"), - containsString("field:brown"), true); - - // match queries with cutoff frequency - assertExplanation(QueryBuilders.matchQuery("field", "huge brown pidgin").cutoffFrequency(1), - containsString("+field:pidgin field:huge field:brown"), true); - assertExplanation(QueryBuilders.matchQuery("field", "the brown").analyzer("stop"), - containsString("field:brown"), true); - // fuzzy queries assertExplanation(QueryBuilders.fuzzyQuery("field", "the").fuzziness(Fuzziness.fromEdits(2)), containsString("field:the (field:tree)^0.3333333"), true); @@ -233,7 +221,7 @@ public void testExplainWithRewriteValidateQuery() throws Exception { containsString("field:huge field:pidgin"), true); } - public void testExplainWithRewriteValidateQueryAllShards() throws Exception { + public void testExplainWithRewriteValidateQueryAllShards() { client().admin().indices().prepareCreate("test") .addMapping("type1", "field", "type=text,analyzer=whitespace") .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put("index.number_of_routing_shards", 2)).get(); @@ -262,7 +250,7 @@ public void testExplainWithRewriteValidateQueryAllShards() throws Exception { ), true, true); } - public void testIrrelevantPropertiesBeforeQuery() throws IOException { + public void testIrrelevantPropertiesBeforeQuery() { createIndex("test"); ensureGreen(); refresh(); @@ -271,7 +259,7 @@ public void testIrrelevantPropertiesBeforeQuery() throws IOException { new BytesArray("{\"foo\": \"bar\", \"query\": {\"term\" : { \"user\" : \"kimchy\" }}}"))).get().isValid(), equalTo(false)); } - public void testIrrelevantPropertiesAfterQuery() throws IOException { + public void testIrrelevantPropertiesAfterQuery() { createIndex("test"); ensureGreen(); refresh(); @@ -311,7 +299,7 @@ private static void assertExplanations(QueryBuilder queryBuilder, } } - public void testExplainTermsQueryWithLookup() throws Exception { + public void testExplainTermsQueryWithLookup() { client().admin().indices().prepareCreate("twitter") .addMapping("_doc", "user", "type=integer", "followers", "type=integer") .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put("index.number_of_routing_shards", 2)).get(); diff --git a/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query1.json b/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query1.json deleted file mode 100644 index b2728dac09df4..0000000000000 --- a/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query1.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "common" : { - "dogs" : { - "query" : "buck mia tom", - "cutoff_frequency" : 1, - "minimum_should_match" : { - "low_freq" : 2 - } - } - } -} diff --git a/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query2.json b/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query2.json deleted file mode 100644 index aeb281bb7592a..0000000000000 --- a/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query2.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "common" : { - "dogs" : { - "query" : "buck mia tom", - "minimum_should_match" : { - "high_freq" : "50%", - "low_freq" : "5<20%" - } - } - } -} diff --git a/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query3.json b/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query3.json deleted file mode 100644 index f276209ffc7ed..0000000000000 --- a/server/src/test/resources/org/elasticsearch/index/query/commonTerms-query3.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "common" : { - "dogs" : { - "query" : "buck mia tom", - "cutoff_frequency" : 1, - "minimum_should_match" : 2 - } - } -} diff --git a/server/src/test/resources/org/elasticsearch/index/translog/translog-v2.binary b/server/src/test/resources/org/elasticsearch/index/translog/translog-v2.binary new file mode 100644 index 0000000000000..a61a02adee385 Binary files /dev/null and b/server/src/test/resources/org/elasticsearch/index/translog/translog-v2.binary differ diff --git a/settings.gradle b/settings.gradle index e0650f618ddf4..5408f23976f30 100644 --- a/settings.gradle +++ b/settings.gradle @@ -120,43 +120,47 @@ include projects.toArray(new String[0]) project(':build-tools').projectDir = new File(rootProject.projectDir, 'buildSrc') +project(":libs").children.each { libsProject -> + libsProject.name = "elasticsearch-${libsProject.name}" +} + if (isEclipse) { project(":server").projectDir = new File(rootProject.projectDir, 'server/src/main') project(":server").buildFileName = 'eclipse-build.gradle' project(":server-tests").projectDir = new File(rootProject.projectDir, 'server/src/test') project(":server-tests").buildFileName = 'eclipse-build.gradle' - project(":libs:core").projectDir = new File(rootProject.projectDir, 'libs/core/src/main') - project(":libs:core").buildFileName = 'eclipse-build.gradle' - project(":libs:core-tests").projectDir = new File(rootProject.projectDir, 'libs/core/src/test') - project(":libs:core-tests").buildFileName = 'eclipse-build.gradle' - project(":libs:dissect").projectDir = new File(rootProject.projectDir, 'libs/dissect/src/main') - project(":libs:dissect").buildFileName = 'eclipse-build.gradle' - project(":libs:dissect-tests").projectDir = new File(rootProject.projectDir, 'libs/dissect/src/test') - project(":libs:dissect-tests").buildFileName = 'eclipse-build.gradle' - project(":libs:nio").projectDir = new File(rootProject.projectDir, 'libs/nio/src/main') - project(":libs:nio").buildFileName = 'eclipse-build.gradle' - project(":libs:nio-tests").projectDir = new File(rootProject.projectDir, 'libs/nio/src/test') - project(":libs:nio-tests").buildFileName = 'eclipse-build.gradle' - project(":libs:x-content").projectDir = new File(rootProject.projectDir, 'libs/x-content/src/main') - project(":libs:x-content").buildFileName = 'eclipse-build.gradle' - project(":libs:x-content-tests").projectDir = new File(rootProject.projectDir, 'libs/x-content/src/test') - project(":libs:x-content-tests").buildFileName = 'eclipse-build.gradle' - project(":libs:secure-sm").projectDir = new File(rootProject.projectDir, 'libs/secure-sm/src/main') - project(":libs:secure-sm").buildFileName = 'eclipse-build.gradle' - project(":libs:secure-sm-tests").projectDir = new File(rootProject.projectDir, 'libs/secure-sm/src/test') - project(":libs:secure-sm-tests").buildFileName = 'eclipse-build.gradle' - project(":libs:grok").projectDir = new File(rootProject.projectDir, 'libs/grok/src/main') - project(":libs:grok").buildFileName = 'eclipse-build.gradle' - project(":libs:grok-tests").projectDir = new File(rootProject.projectDir, 'libs/grok/src/test') - project(":libs:grok-tests").buildFileName = 'eclipse-build.gradle' - project(":libs:geo").projectDir = new File(rootProject.projectDir, 'libs/geo/src/main') - project(":libs:geo").buildFileName = 'eclipse-build.gradle' - project(":libs:geo-tests").projectDir = new File(rootProject.projectDir, 'libs/geo/src/test') - project(":libs:geo-tests").buildFileName = 'eclipse-build.gradle' - project(":libs:ssl-config").projectDir = new File(rootProject.projectDir, 'libs/ssl-config/src/main') - project(":libs:ssl-config").buildFileName = 'eclipse-build.gradle' - project(":libs:ssl-config-tests").projectDir = new File(rootProject.projectDir, 'libs/ssl-config/src/test') - project(":libs:ssl-config-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-core").projectDir = new File(rootProject.projectDir, 'libs/core/src/main') + project(":libs:elasticsearch-core").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-core-tests").projectDir = new File(rootProject.projectDir, 'libs/core/src/test') + project(":libs:elasticsearch-core-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-dissect").projectDir = new File(rootProject.projectDir, 'libs/dissect/src/main') + project(":libs:elasticsearch-dissect").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-dissect-tests").projectDir = new File(rootProject.projectDir, 'libs/dissect/src/test') + project(":libs:elasticsearch-dissect-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-nio").projectDir = new File(rootProject.projectDir, 'libs/nio/src/main') + project(":libs:elasticsearch-nio").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-nio-tests").projectDir = new File(rootProject.projectDir, 'libs/nio/src/test') + project(":libs:elasticsearch-nio-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-x-content").projectDir = new File(rootProject.projectDir, 'libs/x-content/src/main') + project(":libs:elasticsearch-x-content").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-x-content-tests").projectDir = new File(rootProject.projectDir, 'libs/x-content/src/test') + project(":libs:elasticsearch-x-content-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-secure-sm").projectDir = new File(rootProject.projectDir, 'libs/secure-sm/src/main') + project(":libs:elasticsearch-secure-sm").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-secure-sm-tests").projectDir = new File(rootProject.projectDir, 'libs/secure-sm/src/test') + project(":libs:elasticsearch-secure-sm-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-grok").projectDir = new File(rootProject.projectDir, 'libs/grok/src/main') + project(":libs:elasticsearch-grok").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-grok-tests").projectDir = new File(rootProject.projectDir, 'libs/grok/src/test') + project(":libs:elasticsearch-grok-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-geo").projectDir = new File(rootProject.projectDir, 'libs/geo/src/main') + project(":libs:elasticsearch-geo").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-geo-tests").projectDir = new File(rootProject.projectDir, 'libs/geo/src/test') + project(":libs:elasticsearch-geo-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-ssl-config").projectDir = new File(rootProject.projectDir, 'libs/ssl-config/src/main') + project(":libs:elasticsearch-ssl-config").buildFileName = 'eclipse-build.gradle' + project(":libs:elasticsearch-ssl-config-tests").projectDir = new File(rootProject.projectDir, 'libs/ssl-config/src/test') + project(":libs:elasticsearch-ssl-config-tests").buildFileName = 'eclipse-build.gradle' project(":client:rest-high-level").projectDir = new File(rootProject.projectDir, 'client/rest-high-level/src/main') project(":client:rest-high-level").buildFileName = 'eclipse-build.gradle' project(":client:rest-high-level-tests").projectDir = new File(rootProject.projectDir, 'client/rest-high-level/src/test') @@ -175,7 +179,3 @@ if (extraProjects.exists()) { addSubProjects('', extraProjectDir) } } - -project(":libs:cli").name = 'elasticsearch-cli' -project(":libs:geo").name = 'elasticsearch-geo' -project(":libs:ssl-config").name = 'elasticsearch-ssl-config' diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 18978bd2d7560..9cabdb82bf36a 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -18,11 +18,11 @@ */ dependencies { - compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" - compile "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}" - compile "org.elasticsearch:elasticsearch-nio:${version}" - compile "org.elasticsearch:elasticsearch:${version}" - compile "org.elasticsearch:elasticsearch-cli:${version}" + compile project(":client:rest") + compile project(":client:sniffer") + compile project(':libs:elasticsearch-nio') + compile project(":server") + compile project(":libs:elasticsearch-cli") compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" compile "junit:junit:${versions.junit}" compile "org.hamcrest:hamcrest:${versions.hamcrest}" diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 0ab0afb6b0e7a..cff0c6b4cf2d7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1608,9 +1608,10 @@ public enum Scope { boolean supportsDedicatedMasters() default true; /** - * The cluster automatically manages the bootstrap voting configuration. Set this to false to manage the setting manually. + * Indicates whether the cluster automatically manages cluster bootstrapping and the removal of any master-eligible nodes. If + * set to {@code false} then the tests must manage these processes explicitly. */ - boolean autoMinMasterNodes() default true; + boolean autoManageMasterNodes() default true; /** * Returns the number of client nodes in the cluster. Default is {@link InternalTestCluster#DEFAULT_NUM_CLIENT_NODES}, a @@ -1698,9 +1699,9 @@ private boolean getSupportsDedicatedMasters() { return annotation == null ? true : annotation.supportsDedicatedMasters(); } - private boolean getAutoMinMasterNodes() { + private boolean getAutoManageMasterNodes() { ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class); - return annotation == null ? true : annotation.autoMinMasterNodes(); + return annotation == null ? true : annotation.autoManageMasterNodes(); } private int getNumDataNodes() { @@ -1838,7 +1839,7 @@ protected TestCluster buildTestCluster(Scope scope, long seed) throws IOExceptio } mockPlugins = mocks; } - return new InternalTestCluster(seed, createTempDir(), supportsDedicatedMasters, getAutoMinMasterNodes(), + return new InternalTestCluster(seed, createTempDir(), supportsDedicatedMasters, getAutoManageMasterNodes(), minNumDataNodes, maxNumDataNodes, InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", nodeConfigurationSource, getNumClientNodes(), nodePrefix, mockPlugins, getClientWrapper(), forbidPrivateIndexSettings()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 695564690c4b1..4780bc4fba8bd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -227,7 +227,7 @@ public final class InternalTestCluster extends TestCluster { private final ExecutorService executor; - private final boolean autoManageMinMasterNodes; + private final boolean autoManageMasterNodes; private final Collection> mockPlugins; @@ -250,7 +250,7 @@ public InternalTestCluster( final long clusterSeed, final Path baseDir, final boolean randomlyAddDedicatedMasters, - final boolean autoManageMinMasterNodes, + final boolean autoManageMasterNodes, final int minNumDataNodes, final int maxNumDataNodes, final String clusterName, @@ -263,7 +263,7 @@ public InternalTestCluster( clusterSeed, baseDir, randomlyAddDedicatedMasters, - autoManageMinMasterNodes, + autoManageMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName, @@ -279,7 +279,7 @@ public InternalTestCluster( final long clusterSeed, final Path baseDir, final boolean randomlyAddDedicatedMasters, - final boolean autoManageMinMasterNodes, + final boolean autoManageMasterNodes, final int minNumDataNodes, final int maxNumDataNodes, final String clusterName, @@ -290,7 +290,7 @@ public InternalTestCluster( final Function clientWrapper, final boolean forbidPrivateIndexSettings) { super(clusterSeed); - this.autoManageMinMasterNodes = autoManageMinMasterNodes; + this.autoManageMasterNodes = autoManageMasterNodes; this.clientWrapper = clientWrapper; this.forbidPrivateIndexSettings = forbidPrivateIndexSettings; this.baseDir = baseDir; @@ -305,7 +305,7 @@ public InternalTestCluster( Random random = new Random(clusterSeed); - boolean useDedicatedMasterNodes = randomlyAddDedicatedMasters ? random.nextBoolean() : false; + boolean useDedicatedMasterNodes = randomlyAddDedicatedMasters && random.nextBoolean(); this.numSharedDataNodes = RandomNumbers.randomIntBetween(random, minNumDataNodes, maxNumDataNodes); assert this.numSharedDataNodes >= 0; @@ -345,10 +345,10 @@ public InternalTestCluster( } logger.info("Setup InternalTestCluster [{}] with seed [{}] using [{}] dedicated masters, " + - "[{}] (data) nodes and [{}] coord only nodes (min_master_nodes are [{}])", + "[{}] (data) nodes and [{}] coord only nodes (master nodes are [{}])", clusterName, SeedUtils.formatSeed(clusterSeed), numSharedDedicatedMasterNodes, numSharedDataNodes, numSharedCoordOnlyNodes, - autoManageMinMasterNodes ? "auto-managed" : "manual"); + autoManageMasterNodes ? "auto-managed" : "manual"); this.nodeConfigurationSource = nodeConfigurationSource; numDataPaths = random.nextInt(5) == 0 ? 2 + random.nextInt(3) : 1; Builder builder = Settings.builder(); @@ -394,12 +394,11 @@ public InternalTestCluster( /** * Sets {@link #bootstrapMasterNodeIndex} to the given value, see {@link #bootstrapMasterNodeWithSpecifiedIndex(List)} * for the description of how this field is used. - * It's only possible to change {@link #bootstrapMasterNodeIndex} value if autoManageMinMasterNodes is false. + * It's only possible to change {@link #bootstrapMasterNodeIndex} value if autoManageMasterNodes is false. */ public void setBootstrapMasterNodeIndex(int bootstrapMasterNodeIndex) { - if (autoManageMinMasterNodes && bootstrapMasterNodeIndex != -1) { - throw new AssertionError("bootstrapMasterNodeIndex should be -1 if autoManageMinMasterNodes is true"); - } + assert autoManageMasterNodes == false || bootstrapMasterNodeIndex == -1 + : "bootstrapMasterNodeIndex should be -1 if autoManageMasterNodes is true, but was " + bootstrapMasterNodeIndex; this.bootstrapMasterNodeIndex = bootstrapMasterNodeIndex; } @@ -531,7 +530,7 @@ private NodeAndClient getOrBuildRandomNode() { final Runnable onTransportServiceStarted = () -> {}; // do not create unicast host file for this one node. final int nodeId = nextNodeId.getAndIncrement(); - final Settings settings = getNodeSettings(nodeId, random.nextLong(), Settings.EMPTY, 1); + final Settings settings = getNodeSettings(nodeId, random.nextLong(), Settings.EMPTY); final Settings nodeSettings = Settings.builder() .putList(INITIAL_MASTER_NODES_SETTING.getKey(), Node.NODE_NAME_SETTING.get(settings)) .put(settings) @@ -605,7 +604,7 @@ public synchronized void ensureAtMostNumDataNodes(int n) throws IOException { } } - private Settings getNodeSettings(final int nodeId, final long seed, final Settings extraSettings, final int defaultMinMasterNodes) { + private Settings getNodeSettings(final int nodeId, final long seed, final Settings extraSettings) { final Settings settings = getSettings(nodeId, seed, extraSettings); final String name = buildNodeName(nodeId, settings); @@ -629,8 +628,8 @@ private Settings getNodeSettings(final int nodeId, final long seed, final Settin updatedSettings.put("node.name", name); updatedSettings.put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), seed); - if (autoManageMinMasterNodes) { - assertThat("automatically managing min master nodes require nodes to complete a join cycle when starting", + if (autoManageMasterNodes) { + assertThat("if master nodes are automatically managed then nodes must complete a join cycle when starting", updatedSettings.get(INITIAL_STATE_TIMEOUT_SETTING.getKey()), nullValue()); } @@ -810,8 +809,6 @@ public synchronized void close() throws IOException { } } - private static final int REMOVED_MINIMUM_MASTER_NODES = Integer.MAX_VALUE; - private final class NodeAndClient implements Closeable { private MockNode node; private final Settings originalNodeSettings; @@ -889,18 +886,16 @@ void startNode() { /** * closes the node and prepares it to be restarted */ - Settings closeForRestart(RestartCallback callback, int minMasterNodes) throws Exception { + Settings closeForRestart(RestartCallback callback) throws Exception { assert callback != null; close(); Settings callbackSettings = callback.onNodeStopped(name); assert callbackSettings != null; Settings.Builder newSettings = Settings.builder(); - newSettings.put(callbackSettings); - if (minMasterNodes >= 0) { - if (INITIAL_MASTER_NODES_SETTING.exists(callbackSettings) == false) { - newSettings.putList(INITIAL_MASTER_NODES_SETTING.getKey()); - } + if (autoManageMasterNodes) { + newSettings.putList(INITIAL_MASTER_NODES_SETTING.getKey()); } + newSettings.put(callbackSettings); // delete data folders now, before we start other nodes that may claim it clearDataIfNeeded(callback); return newSettings.build(); @@ -1026,9 +1021,7 @@ private synchronized void reset(boolean wipeData) throws IOException { // start any missing node assert newSize == numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes; - final int numberOfMasterNodes = numSharedDedicatedMasterNodes > 0 ? numSharedDedicatedMasterNodes : numSharedDataNodes; - final int defaultMinMasterNodes = (numberOfMasterNodes / 2) + 1; - final List toStartAndPublish = new ArrayList<>(); // we want to start nodes in one go due to min master nodes + final List toStartAndPublish = new ArrayList<>(); // we want to start nodes in one go final Runnable onTransportServiceStarted = () -> rebuildUnicastHostFiles(toStartAndPublish); final List settings = new ArrayList<>(); @@ -1037,7 +1030,7 @@ private synchronized void reset(boolean wipeData) throws IOException { final Settings.Builder extraSettings = Settings.builder(); extraSettings.put(Node.NODE_MASTER_SETTING.getKey(), true); extraSettings.put(Node.NODE_DATA_SETTING.getKey(), false); - settings.add(getNodeSettings(i, sharedNodesSeeds[i], extraSettings.build(), defaultMinMasterNodes)); + settings.add(getNodeSettings(i, sharedNodesSeeds[i], extraSettings.build())); } for (int i = numSharedDedicatedMasterNodes; i < numSharedDedicatedMasterNodes + numSharedDataNodes; i++) { final Settings.Builder extraSettings = Settings.builder(); @@ -1046,13 +1039,13 @@ private synchronized void reset(boolean wipeData) throws IOException { extraSettings.put(Node.NODE_MASTER_SETTING.getKey(), false).build(); extraSettings.put(Node.NODE_DATA_SETTING.getKey(), true).build(); } - settings.add(getNodeSettings(i, sharedNodesSeeds[i], extraSettings.build(), defaultMinMasterNodes)); + settings.add(getNodeSettings(i, sharedNodesSeeds[i], extraSettings.build())); } for (int i = numSharedDedicatedMasterNodes + numSharedDataNodes; i < numSharedDedicatedMasterNodes + numSharedDataNodes + numSharedCoordOnlyNodes; i++) { final Builder extraSettings = Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false) .put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_INGEST_SETTING.getKey(), false); - settings.add(getNodeSettings(i, sharedNodesSeeds[i], extraSettings.build(), defaultMinMasterNodes)); + settings.add(getNodeSettings(i, sharedNodesSeeds[i], extraSettings.build())); } int autoBootstrapMasterNodeIndex = -1; @@ -1061,7 +1054,7 @@ private synchronized void reset(boolean wipeData) throws IOException { .map(Node.NODE_NAME_SETTING::get) .collect(Collectors.toList()); - if (prevNodeCount == 0 && autoManageMinMasterNodes) { + if (prevNodeCount == 0 && autoManageMasterNodes) { if (numSharedDedicatedMasterNodes > 0) { autoBootstrapMasterNodeIndex = RandomNumbers.randomIntBetween(random, 0, numSharedDedicatedMasterNodes - 1); } else if (numSharedDataNodes > 0) { @@ -1084,7 +1077,7 @@ private synchronized void reset(boolean wipeData) throws IOException { nextNodeId.set(newSize); assert size() == newSize; - if (autoManageMinMasterNodes && newSize > 0) { + if (autoManageMasterNodes && newSize > 0) { validateClusterFormed(); } logger.debug("Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", @@ -1469,7 +1462,7 @@ public InetSocketAddress[] httpAddresses() { for (HttpServerTransport httpServerTransport : getInstances(HttpServerTransport.class)) { addresses.add(httpServerTransport.boundAddress().publishAddress().address()); } - return addresses.toArray(new InetSocketAddress[addresses.size()]); + return addresses.toArray(new InetSocketAddress[0]); } /** @@ -1543,8 +1536,7 @@ private synchronized void startAndPublishNodesAndClients(List nod } nodeAndClients.forEach(this::publishNode); - if (autoManageMinMasterNodes && currentMasters > 0 && newMasters > 0 && - getMinMasterNodes(currentMasters + newMasters) > currentMasters) { + if (autoManageMasterNodes && newMasters > 0) { // update once masters have joined validateClusterFormed(); } @@ -1652,11 +1644,8 @@ private void restartNode(NodeAndClient nodeAndClient, RestartCallback callback) activeDisruptionScheme.removeFromNode(nodeAndClient.name, this); } - Set excludedNodeIds = excludeMasters(Collections.singleton(nodeAndClient)); - - final Settings newSettings = nodeAndClient.closeForRestart(callback, - autoManageMinMasterNodes ? getMinMasterNodes(getMasterNodesCount()) : -1); - + final Set excludedNodeIds = excludeMasters(Collections.singleton(nodeAndClient)); + final Settings newSettings = nodeAndClient.closeForRestart(callback); removeExclusions(excludedNodeIds); boolean success = false; @@ -1675,10 +1664,8 @@ private void restartNode(NodeAndClient nodeAndClient, RestartCallback callback) } if (callback.validateClusterForming() || excludedNodeIds.isEmpty() == false) { - // we have to validate cluster size if updateMinMaster == true, because we need the - // second node to join in order to increment min_master_nodes back to 2. - // we also have to do via the node that was just restarted as it may be that the master didn't yet process - // the fact it left + // we have to validate cluster size to ensure that the restarted node has rejoined the cluster if it was master-eligible; + // we have to do this via the node that was just restarted as it may be that the master didn't yet process the fact that it left validateClusterFormed(nodeAndClient.name); } } @@ -1694,7 +1681,7 @@ private NodeAndClient removeNode(NodeAndClient nodeAndClient) { private Set excludeMasters(Collection nodeAndClients) { assert Thread.holdsLock(this); final Set excludedNodeIds = new HashSet<>(); - if (autoManageMinMasterNodes && nodeAndClients.size() > 0) { + if (autoManageMasterNodes && nodeAndClients.size() > 0) { final long currentMasters = nodes.values().stream().filter(NodeAndClient::isMasterEligible).count(); final long stoppingMasters = nodeAndClients.stream().filter(NodeAndClient::isMasterEligible).count(); @@ -1741,7 +1728,6 @@ public synchronized void fullRestart(RestartCallback callback) throws Exception final Settings[] newNodeSettings = new Settings[nextNodeId.get()]; Map, List> nodesByRoles = new HashMap<>(); Set[] rolesOrderedByOriginalStartupOrder = new Set[nextNodeId.get()]; - final int minMasterNodes = autoManageMinMasterNodes ? getMinMasterNodes(getMasterNodesCount()) : -1; for (NodeAndClient nodeAndClient : nodes.values()) { callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient()); logger.info("Stopping and resetting node [{}] ", nodeAndClient.name); @@ -1749,7 +1735,7 @@ public synchronized void fullRestart(RestartCallback callback) throws Exception activeDisruptionScheme.removeFromNode(nodeAndClient.name, this); } DiscoveryNode discoveryNode = getInstanceFromNode(ClusterService.class, nodeAndClient.node()).localNode(); - final Settings newSettings = nodeAndClient.closeForRestart(callback, minMasterNodes); + final Settings newSettings = nodeAndClient.closeForRestart(callback); newNodeSettings[nodeAndClient.nodeAndClientId()] = newSettings; rolesOrderedByOriginalStartupOrder[nodeAndClient.nodeAndClientId()] = discoveryNode.getRoles(); nodesByRoles.computeIfAbsent(discoveryNode.getRoles(), k -> new ArrayList<>()).add(nodeAndClient); @@ -1934,25 +1920,18 @@ public List startNodes(int numOfNodes, Settings settings) { */ public synchronized List startNodes(Settings... extraSettings) { final int newMasterCount = Math.toIntExact(Stream.of(extraSettings).filter(Node.NODE_MASTER_SETTING::get).count()); - final int defaultMinMasterNodes; - if (autoManageMinMasterNodes) { - defaultMinMasterNodes = getMinMasterNodes(getMasterNodesCount() + newMasterCount); - } else { - defaultMinMasterNodes = -1; - } final List nodes = new ArrayList<>(); final int prevMasterCount = getMasterNodesCount(); - int autoBootstrapMasterNodeIndex = - prevMasterCount == 0 && autoManageMinMasterNodes && newMasterCount > 0 && Arrays.stream(extraSettings) - .allMatch(s -> Node.NODE_MASTER_SETTING.get(s) == false - || ZEN2_DISCOVERY_TYPE.equals(DISCOVERY_TYPE_SETTING.get(s))) + int autoBootstrapMasterNodeIndex = autoManageMasterNodes && prevMasterCount == 0 && newMasterCount > 0 + && Arrays.stream(extraSettings) + .allMatch(s -> Node.NODE_MASTER_SETTING.get(s) == false || ZEN2_DISCOVERY_TYPE.equals(DISCOVERY_TYPE_SETTING.get(s))) ? RandomNumbers.randomIntBetween(random, 0, newMasterCount - 1) : -1; final int numOfNodes = extraSettings.length; final int firstNodeId = nextNodeId.getAndIncrement(); final List settings = new ArrayList<>(); for (int i = 0; i < numOfNodes; i++) { - settings.add(getNodeSettings(firstNodeId + i, random.nextLong(), extraSettings[i], defaultMinMasterNodes)); + settings.add(getNodeSettings(firstNodeId + i, random.nextLong(), extraSettings[i])); } nextNodeId.set(firstNodeId + numOfNodes); @@ -1978,7 +1957,7 @@ public synchronized List startNodes(Settings... extraSettings) { nodes.add(nodeAndClient); } startAndPublishNodesAndClients(nodes); - if (autoManageMinMasterNodes) { + if (autoManageMasterNodes) { validateClusterFormed(); } return nodes.stream().map(NodeAndClient::getName).collect(Collectors.toList()); @@ -2004,11 +1983,6 @@ public List startDataOnlyNodes(int numNodes) { .put(Node.NODE_DATA_SETTING.getKey(), true).build()); } - /** calculates a min master nodes value based on the given number of master nodes */ - private static int getMinMasterNodes(int eligibleMasterNodes) { - return eligibleMasterNodes / 2 + 1; - } - private int getMasterNodesCount() { return (int) nodes.values().stream().filter(n -> Node.NODE_MASTER_SETTING.get(n.node().settings())).count(); } @@ -2143,7 +2117,7 @@ synchronized String routingKeyForShard(Index index, int shard, Random random) { greaterThan(shard)); OperationRouting operationRouting = clusterService.operationRouting(); while (true) { - String routing = RandomStrings.randomAsciiOfLength(random, 10); + String routing = RandomStrings.randomAsciiLettersOfLength(random, 10); final int targetShard = operationRouting .indexShards(clusterService.state(), index.getName(), null, routing) .shardId().getId(); diff --git a/test/framework/src/test/java/org/elasticsearch/test/disruption/NetworkDisruptionIT.java b/test/framework/src/test/java/org/elasticsearch/test/disruption/NetworkDisruptionIT.java index a2f6b3ed654e6..a018bc1663175 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/disruption/NetworkDisruptionIT.java +++ b/test/framework/src/test/java/org/elasticsearch/test/disruption/NetworkDisruptionIT.java @@ -37,7 +37,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoMinMasterNodes = false) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) public class NetworkDisruptionIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { diff --git a/test/logger-usage/build.gradle b/test/logger-usage/build.gradle index 80cb060167114..f949e2a162931 100644 --- a/test/logger-usage/build.gradle +++ b/test/logger-usage/build.gradle @@ -22,7 +22,7 @@ dependencies { compile 'org.ow2.asm:asm-tree:7.1' compile 'org.ow2.asm:asm-analysis:7.1' compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" - testCompile "org.elasticsearch.test:framework:${version}" + testCompile project(":test:framework") } loggerUsageCheck.enabled = false diff --git a/x-pack/build.gradle b/x-pack/build.gradle index 9c90bbbbfc269..f9b13f07618ce 100644 --- a/x-pack/build.gradle +++ b/x-pack/build.gradle @@ -26,17 +26,3 @@ subprojects { project.ext.licenseFile = rootProject.file('licenses/ELASTIC-LICENSE.txt') project.ext.noticeFile = xpackRootProject.file('NOTICE.txt') } - -subprojects { - ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-ccr:${version}": xpackModule('ccr')] - ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-core:${version}": xpackModule('core')] - ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-deprecation:${version}": xpackModule('deprecation')] - ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-graph:${version}": xpackModule('graph')] - ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-ilm:${version}": xpackModule('ilm')] - ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-logstash:${version}": xpackModule('logstash')] - ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-ml:${version}": xpackModule('ml')] - ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-monitoring:${version}": xpackModule('monitoring')] - ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-security:${version}": xpackModule('security')] - ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-sql:${version}": xpackModule('sql')] - ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-watcher:${version}": xpackModule('watcher')] -} diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 0a23bb9c9cf62..fe07450bbc10e 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -18,7 +18,6 @@ buildRestTests.expectedUnconvertedCandidates = [ ] dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') if (isEclipse) { diff --git a/x-pack/docs/en/security/authentication/oidc-guide.asciidoc b/x-pack/docs/en/security/authentication/oidc-guide.asciidoc index df5ce11c63c14..d7d68ada0e721 100644 --- a/x-pack/docs/en/security/authentication/oidc-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/oidc-guide.asciidoc @@ -103,12 +103,13 @@ xpack.security.authc.realms.oidc.oidc1: rp.client_id: "the_client_id" rp.response_type: code rp.redirect_uri: "https://kibana.example.org:5601/api/security/v1/oidc" + op.issuer: "https://op.example.org" op.authorization_endpoint: "https://op.example.org/oauth2/v1/authorize" op.token_endpoint: "https://op.example.org/oauth2/v1/token" + op.jwkset_path: oidc/jwkset.json op.userinfo_endpoint: "https://op.example.org/oauth2/v1/userinfo" op.endsession_endpoint: "https://op.example.org/oauth2/v1/logout" - op.issuer: "https://op.example.org" - op.jwkset_path: oidc/jwkset.json + rp.post_logout_redirect_uri: "https://kibana.example.org:5601/logged_out" claims.principal: sub claims.groups: "http://example.info/claims/groups" ------------------------------------------------------------------------------------- @@ -146,6 +147,10 @@ rp.redirect_uri:: _exactly_ the same as the one <> and will typically be +$\{kibana-url}/api/security/v1/oidc+ where _$\{kibana-url}_ is the base URL for your {kib} instance +op.issuer:: + A verifiable Identifier for your OpenID Connect Provider. An Issuer Identifier is usually a case sensitive URL. + The value for this setting should be provided by your OpenID Connect Provider. + op.authorization_endpoint:: The URL for the Authorization Endpoint in the OP. This is where the user's browser will be redirected to start the authentication process. The value for this setting should be provided by your @@ -156,6 +161,13 @@ op.token_endpoint:: {es} will send a request to exchange the code for an ID Token, in the case where the Authorization Code flow is used. The value for this setting should be provided by your OpenID Connect Provider. +op.jwkset_path:: + The path to a file or a URL containing a JSON Web Key Set with the key material that the OpenID Connect + Provider uses for signing tokens and claims responses. If a path is set, it is resolved relative to the {es} + config directory. + {es} will automatically monitor this file for changes and will reload the configuration whenever + it is updated. Your OpenID Connect Provider should provide you with this file or a URL where it is available. + op.userinfo_endpoint:: (Optional) The URL for the UserInfo Endpoint in the OpenID Connect Provider. This is the endpoint of the OP that can be queried to get further user information, if required. The value for this setting should be provided by your @@ -166,12 +178,11 @@ op.endsession_endpoint:: browser will be redirected after local logout, if the realm is configured for RP initiated Single Logout and the OP supports it. The value for this setting should be provided by your OpenID Connect Provider. -op.jwkset_path:: - The path to a file containing a JSON Web Key Set with the key material that the OpenID Connect - Provider uses for signing tokens and claims responses. The path is resolved relative to the {es} - config directory. - {es} will automatically monitor this file for changes and will reload the configuration whenever - it is updated. Your OpenID Connect Provider should provide you with this file. +rp.post_logout_redirect_uri:: + (Optional) The Redirect URL where the OpenID Connect Provider should redirect the user after a + successful Single Logout (assuming `op.endsession_endpoint` above is also set). This should be set to a value that + will not trigger a new OpenID Connect Authentication, such as +$\{kibana-url}/logged_out+ where _$\{kibana-url}_ is + the base URL for your {kib} instance. claims.principal:: See <>. claims.groups:: See <>. @@ -306,6 +317,7 @@ realm, as demonstrated in the realm configuration below: [source, yaml] ------------------------------------------------------------------------------------- xpack.security.authc.realms.oidc.oidc1: + order: 2 rp.client_id: "the_client_id" rp.response_type: code rp.redirect_uri: "https://kibana.example.org:5601/api/security/v1/oidc" @@ -369,6 +381,30 @@ will trigger re-authentication of the user. For instance, when using OpenID Conn single sign-on to {kib}, this could be set to +$\{kibana-url}/logged_out+, which will show a user- friendly message to the user. +[[oidc-ssl-config]] +==== OpenID Connect Realm SSL Configuration + +OpenID Connect depends on TLS to provide security properties such as encryption in transit and endpoint authentication. The RP +is required to establish back-channel communication with the OP in order to exchange the code for an ID Token during the +Authorization code grant flow and in order to get additional user information from the UserInfo endpoint. Furthermore, if +you configure `op.jwks_path` as a URL, {es} will need to get the OP's signing keys from the file hosted there. As such, it is +important that {es} can validate and trust the server certificate that the OP uses for TLS. Since the system truststore is +used for the client context of outgoing https connections, if your OP is using a certificate from a trusted CA, no additional +configuration is needed. + +However, if the issuer of your OP's certificate is not trusted by the JVM on which {es} is running (e.g it uses a organization CA), then you must configure +{es} to trust that CA. Assuming that you have the CA certificate that has signed the certificate that the OP uses for TLS +stored in the /oidc/company-ca.pem` file stored in the configuration directory of {es}, you need to set the following +property in the realm configuration: + +[source, yaml] +------------------------------------------------------------------------------------- +xpack.security.authc.realms.oidc.oidc1: + order: 1 + ... + ssl.certificate_authorities: ["/oidc/company-ca.pem"] +------------------------------------------------------------------------------------- + [[oidc-role-mapping]] === Configuring role mappings diff --git a/x-pack/license-tools/build.gradle b/x-pack/license-tools/build.gradle index 4bd17713a2fea..2a032dc0cd15a 100644 --- a/x-pack/license-tools/build.gradle +++ b/x-pack/license-tools/build.gradle @@ -1,9 +1,9 @@ apply plugin: 'elasticsearch.build' dependencies { - compile "org.elasticsearch.plugin:x-pack-core:${version}" - compile "org.elasticsearch:elasticsearch:${version}" - testCompile "org.elasticsearch.test:framework:${version}" + compile project(':x-pack:plugin:core') + compile project(':server') + testCompile project(':test:framework') } project.forbiddenPatterns { diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index fc5dc839ef05e..a1e9dc52a653f 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -22,9 +22,7 @@ subprojects { // see the root Gradle file for additional logic regarding this configuration project.configurations.create('featureAwarePlugin') project.dependencies.add('featureAwarePlugin', project.configurations.compileClasspath) - project.dependencies.add( - 'featureAwarePlugin', - "org.elasticsearch.xpack.test:feature-aware:${org.elasticsearch.gradle.VersionProperties.elasticsearch}") + project.dependencies.add('featureAwarePlugin', project(':x-pack:test:feature-aware')) project.dependencies.add('featureAwarePlugin', project.sourceSets.main.output.getClassesDirs()) final Task featureAwareTask = project.tasks.create("featureAwareCheck", LoggedExec) { diff --git a/x-pack/plugin/ccr/build.gradle b/x-pack/plugin/ccr/build.gradle index 4b3ba9307da28..dfc3b85dfe111 100644 --- a/x-pack/plugin/ccr/build.gradle +++ b/x-pack/plugin/ccr/build.gradle @@ -48,7 +48,7 @@ gradle.projectsEvaluated { } dependencies { - compileOnly "org.elasticsearch:elasticsearch:${version}" + compileOnly project(":server") compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollectorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollectorTests.java index bb44fd59da5d2..2b85cd11e927f 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollectorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollectorTests.java @@ -15,9 +15,8 @@ import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ccr.AutoFollowStats; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; -import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; -import org.elasticsearch.xpack.core.ccr.client.CcrClient; +import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; import org.elasticsearch.xpack.monitoring.BaseCollectorTestCase; @@ -33,6 +32,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -127,7 +127,7 @@ public void testDoCollect() throws Exception { whenClusterStateWithUUID(clusterUuid); final MonitoringDoc.Node node = randomMonitoringNode(random()); - final CcrClient client = mock(CcrClient.class); + final Client client = mock(Client.class); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); final List statuses = mockStatuses(); @@ -142,7 +142,7 @@ public void testDoCollect() throws Exception { final ActionFuture future = (ActionFuture) mock(ActionFuture.class); final CcrStatsAction.Response response = new CcrStatsAction.Response(autoFollowStats, statsResponse); - when(client.stats(any())).thenReturn(future); + when(client.execute(eq(CcrStatsAction.INSTANCE), any(CcrStatsAction.Request.class))).thenReturn(future); when(future.actionGet(timeout)).thenReturn(response); final StatsCollector collector = new StatsCollector(settings, clusterService, licenseState, client, threadContext); diff --git a/x-pack/plugin/core/build.gradle b/x-pack/plugin/core/build.gradle index 611e3fe7ade36..3c9f8b740f265 100644 --- a/x-pack/plugin/core/build.gradle +++ b/x-pack/plugin/core/build.gradle @@ -23,7 +23,7 @@ dependencyLicenses { } dependencies { - compileOnly "org.elasticsearch:elasticsearch:${version}" + compileOnly project(":server") compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}" @@ -49,7 +49,7 @@ dependencies { testCompile project(path: ':modules:parent-join', configuration: 'runtime') testCompile project(path: ':modules:lang-mustache', configuration: 'runtime') testCompile project(path: ':modules:analysis-common', configuration: 'runtime') - testCompile ("org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}") + testCompile project(":client:rest-high-level") if (isEclipse == false || project.path == ":x-pack:plugin:core-tests") { testCompile(project(':x-pack:license-tools')) { @@ -103,7 +103,7 @@ forbiddenApisMain { if (isEclipse) { // in eclipse the project is under a fake root, we need to change around the source sets sourceSets { - if (project.path == ":libs:core") { + if (project.path == ":libs:elasticsearch-core") { main.java.srcDirs = ['java'] main.resources.srcDirs = ['resources'] } else { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequestBuilder.java index e56451c4a92ff..368bd4a9b735e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetBasicStatusRequestBuilder.java @@ -10,7 +10,7 @@ class GetBasicStatusRequestBuilder extends ActionRequestBuilder { - GetBasicStatusRequestBuilder(ElasticsearchClient client, GetBasicStatusAction action) { - super(client, action, new GetBasicStatusRequest()); + GetBasicStatusRequestBuilder(ElasticsearchClient client) { + super(client, GetBasicStatusAction.INSTANCE, new GetBasicStatusRequest()); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequestBuilder.java index d220b476bc578..4102e98f45d68 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetTrialStatusRequestBuilder.java @@ -10,7 +10,7 @@ class GetTrialStatusRequestBuilder extends ActionRequestBuilder { - GetTrialStatusRequestBuilder(ElasticsearchClient client, GetTrialStatusAction action) { - super(client, action, new GetTrialStatusRequest()); + GetTrialStatusRequestBuilder(ElasticsearchClient client) { + super(client, GetTrialStatusAction.INSTANCE, new GetTrialStatusRequest()); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java index 80d1fb68f0e2a..4dcfa94f6e4d2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java @@ -45,11 +45,11 @@ public void deleteLicense(DeleteLicenseRequest request, ActionListener listener) { @@ -61,10 +61,10 @@ public void postStartBasic(PostStartBasicRequest request, ActionListener { - PostStartBasicRequestBuilder(ElasticsearchClient client, PostStartBasicAction action) { - super(client, action, new PostStartBasicRequest()); + PostStartBasicRequestBuilder(ElasticsearchClient client) { + super(client, PostStartBasicAction.INSTANCE, new PostStartBasicRequest()); } public PostStartBasicRequestBuilder setAcknowledge(boolean acknowledge) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java index 8e12c879f997d..93a6207d519d1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java @@ -10,8 +10,8 @@ class PostStartTrialRequestBuilder extends ActionRequestBuilder { - PostStartTrialRequestBuilder(ElasticsearchClient client, PostStartTrialAction action) { - super(client, action, new PostStartTrialRequest()); + PostStartTrialRequestBuilder(ElasticsearchClient client) { + super(client, PostStartTrialAction.INSTANCE, new PostStartTrialRequest()); } public PostStartTrialRequestBuilder setAcknowledge(boolean acknowledge) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java index 5383726adc318..56c1898ac793f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java @@ -7,20 +7,20 @@ package org.elasticsearch.license; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.DELETE; -public class RestDeleteLicenseAction extends XPackRestHandler { +public class RestDeleteLicenseAction extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestDeleteLicenseAction.class)); @@ -29,7 +29,7 @@ public class RestDeleteLicenseAction extends XPackRestHandler { // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( DELETE, "/_license", this, - DELETE, URI_BASE + "/license", deprecationLogger); + DELETE, "/_xpack/license", deprecationLogger); } @Override @@ -38,12 +38,12 @@ public String getName() { } @Override - public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPackClient client) throws IOException { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteLicenseRequest deleteLicenseRequest = new DeleteLicenseRequest(); deleteLicenseRequest.timeout(request.paramAsTime("timeout", deleteLicenseRequest.timeout())); deleteLicenseRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteLicenseRequest.masterNodeTimeout())); - return channel -> client.es().admin().cluster().execute(DeleteLicenseAction.INSTANCE, deleteLicenseRequest, + return channel -> client.admin().cluster().execute(DeleteLicenseAction.INSTANCE, deleteLicenseRequest, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetBasicStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetBasicStatus.java index 0195b350b050c..3e61c3978bcbc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetBasicStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetBasicStatus.java @@ -7,17 +7,17 @@ package org.elasticsearch.license; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import static org.elasticsearch.rest.RestRequest.Method.GET; -public class RestGetBasicStatus extends XPackRestHandler { +public class RestGetBasicStatus extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestGetBasicStatus.class)); @@ -26,12 +26,12 @@ public class RestGetBasicStatus extends XPackRestHandler { // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( GET, "/_license/basic_status", this, - GET, URI_BASE + "/license/basic_status", deprecationLogger); + GET, "/_xpack/license/basic_status", deprecationLogger); } @Override - protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) { - return channel -> client.licensing().prepareGetStartBasic().execute(new RestToXContentListener<>(channel)); + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + return channel -> new GetBasicStatusRequestBuilder(client).execute(new RestToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetLicenseAction.java index 02809ae974cd7..4c1102208c840 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetLicenseAction.java @@ -7,18 +7,18 @@ package org.elasticsearch.license; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.protocol.xpack.license.GetLicenseRequest; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; import java.util.HashMap; @@ -28,7 +28,7 @@ import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; -public class RestGetLicenseAction extends XPackRestHandler { +public class RestGetLicenseAction extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestGetLicenseAction.class)); @@ -37,7 +37,7 @@ public class RestGetLicenseAction extends XPackRestHandler { // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( GET, "/_license", this, - GET, URI_BASE + "/license", deprecationLogger); + GET, "/_xpack/license", deprecationLogger); } @Override @@ -52,15 +52,15 @@ public String getName() { * The licenses are sorted by latest issue_date */ @Override - public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPackClient client) throws IOException { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final Map overrideParams = new HashMap<>(2); overrideParams.put(License.REST_VIEW_MODE, "true"); overrideParams.put(License.LICENSE_VERSION_MODE, String.valueOf(License.VERSION_CURRENT)); final ToXContent.Params params = new ToXContent.DelegatingMapParams(overrideParams, request); GetLicenseRequest getLicenseRequest = new GetLicenseRequest(); getLicenseRequest.local(request.paramAsBoolean("local", getLicenseRequest.local())); - return channel -> client.es().admin().cluster().execute(GetLicenseAction.INSTANCE, getLicenseRequest, - new RestBuilderListener(channel) { + return channel -> client.admin().cluster().execute(GetLicenseAction.INSTANCE, getLicenseRequest, + new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(GetLicenseResponse response, XContentBuilder builder) throws Exception { // Default to pretty printing, but allow ?pretty=false to disable diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java index 20366328e5031..2ee79da977357 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java @@ -7,17 +7,17 @@ package org.elasticsearch.license; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import static org.elasticsearch.rest.RestRequest.Method.GET; -public class RestGetTrialStatus extends XPackRestHandler { +public class RestGetTrialStatus extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestGetTrialStatus.class)); @@ -26,12 +26,12 @@ public class RestGetTrialStatus extends XPackRestHandler { // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( GET, "/_license/trial_status", this, - GET, URI_BASE + "/license/trial_status", deprecationLogger); + GET, "/_xpack/license/trial_status", deprecationLogger); } @Override - protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) { - return channel -> client.licensing().prepareGetStartTrial().execute(new RestToXContentListener<>(channel)); + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + return channel -> new GetTrialStatusRequestBuilder(client).execute(new RestToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java index 79e8849669c8f..77f09383b407d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java @@ -7,19 +7,19 @@ package org.elasticsearch.license; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestStatusToXContentListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.POST; -public class RestPostStartBasicLicense extends XPackRestHandler { +public class RestPostStartBasicLicense extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestPostStartBasicLicense.class)); @@ -28,16 +28,16 @@ public class RestPostStartBasicLicense extends XPackRestHandler { // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( POST, "/_license/start_basic", this, - POST, URI_BASE + "/license/start_basic", deprecationLogger); + POST, "/_xpack/license/start_basic", deprecationLogger); } @Override - protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { PostStartBasicRequest startBasicRequest = new PostStartBasicRequest(); startBasicRequest.acknowledge(request.paramAsBoolean("acknowledge", false)); startBasicRequest.timeout(request.paramAsTime("timeout", startBasicRequest.timeout())); startBasicRequest.masterNodeTimeout(request.paramAsTime("master_timeout", startBasicRequest.masterNodeTimeout())); - return channel -> client.licensing().postStartBasic(startBasicRequest, new RestStatusToXContentListener<>(channel)); + return channel -> client.execute(PostStartBasicAction.INSTANCE, startBasicRequest, new RestStatusToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java index a263d0d82c26a..d2ce0776305da 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java @@ -7,23 +7,23 @@ package org.elasticsearch.license; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.POST; -public class RestPostStartTrialLicense extends XPackRestHandler { +public class RestPostStartTrialLicense extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestPostStartTrialLicense.class)); @@ -32,16 +32,16 @@ public class RestPostStartTrialLicense extends XPackRestHandler { // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( POST, "/_license/start_trial", this, - POST, URI_BASE + "/license/start_trial", deprecationLogger); + POST, "/_xpack/license/start_trial", deprecationLogger); } @Override - protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { PostStartTrialRequest startTrialRequest = new PostStartTrialRequest(); startTrialRequest.setType(request.param("type", "trial")); startTrialRequest.acknowledge(request.paramAsBoolean("acknowledge", false)); - return channel -> client.licensing().postStartTrial(startTrialRequest, - new RestBuilderListener(channel) { + return channel -> client.execute(PostStartTrialAction.INSTANCE, startTrialRequest, + new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(PostStartTrialResponse response, XContentBuilder builder) throws Exception { PostStartTrialResponse.Status status = response.getStatus(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java index 986dacb687738..698ec440a6cbe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java @@ -7,20 +7,20 @@ package org.elasticsearch.license; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; -public class RestPutLicenseAction extends XPackRestHandler { +public class RestPutLicenseAction extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestPutLicenseAction.class)); @@ -30,11 +30,11 @@ public class RestPutLicenseAction extends XPackRestHandler { // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( POST, "/_license", this, - POST, URI_BASE + "/license", deprecationLogger); + POST, "/_xpack/license", deprecationLogger); // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( PUT, "/_license", this, - PUT, URI_BASE + "/license", deprecationLogger); + PUT, "/_xpack/license", deprecationLogger); } @Override @@ -43,7 +43,7 @@ public String getName() { } @Override - public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPackClient client) throws IOException { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { if (request.hasContent() == false) { throw new IllegalArgumentException("The license must be provided in the request body"); } @@ -58,8 +58,7 @@ public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPa "/_license/start_basic API to install a basic license that does not expire."); } - return channel -> client.es().admin().cluster().execute(PutLicenseAction.INSTANCE, putLicenseRequest, - new RestToXContentListener<>(channel)); + return channel -> client.execute(PutLicenseAction.INSTANCE, putLicenseRequest, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java deleted file mode 100644 index acf97b63684ae..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.license.LicensingClient; -import org.elasticsearch.protocol.xpack.XPackInfoRequest; -import org.elasticsearch.protocol.xpack.XPackInfoResponse; -import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction.FreezeIndexAction; -import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction.FreezeRequest; -import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction.FreezeResponse; -import org.elasticsearch.xpack.core.action.XPackInfoAction; -import org.elasticsearch.xpack.core.action.XPackInfoRequestBuilder; -import org.elasticsearch.xpack.core.ccr.client.CcrClient; -import org.elasticsearch.xpack.core.indexlifecycle.client.ILMClient; -import org.elasticsearch.xpack.core.ml.client.MachineLearningClient; -import org.elasticsearch.xpack.core.monitoring.client.MonitoringClient; -import org.elasticsearch.xpack.core.watcher.client.WatcherClient; - -import java.util.Collections; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.ExecutionException; - -import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; -import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; - -public class XPackClient { - - private final Client client; - - private final CcrClient ccrClient; - private final LicensingClient licensingClient; - private final MonitoringClient monitoringClient; - private final WatcherClient watcherClient; - private final MachineLearningClient machineLearning; - private final ILMClient ilmClient; - - public XPackClient(Client client) { - this.client = Objects.requireNonNull(client, "client"); - this.ccrClient = new CcrClient(client); - this.licensingClient = new LicensingClient(client); - this.monitoringClient = new MonitoringClient(client); - this.watcherClient = new WatcherClient(client); - this.machineLearning = new MachineLearningClient(client); - this.ilmClient = new ILMClient(client); - } - - public Client es() { - return client; - } - - public CcrClient ccr() { - return ccrClient; - } - - public LicensingClient licensing() { - return licensingClient; - } - - public MonitoringClient monitoring() { - return monitoringClient; - } - - public WatcherClient watcher() { - return watcherClient; - } - - public MachineLearningClient machineLearning() { - return machineLearning; - } - - public ILMClient ilmClient() { - return ilmClient; - } - - public XPackClient withHeaders(Map headers) { - return new XPackClient(client.filterWithHeader(headers)); - } - - /** - * Returns a client that will call xpack APIs on behalf of the given user. - * - * @param username The username of the user - * @param passwd The password of the user. This char array can be cleared after calling this method. - */ - public XPackClient withAuth(String username, char[] passwd) { - return withHeaders(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue(username, new SecureString(passwd)))); - } - - public XPackInfoRequestBuilder prepareInfo() { - return new XPackInfoRequestBuilder(client); - } - - public void info(XPackInfoRequest request, ActionListener listener) { - client.execute(XPackInfoAction.INSTANCE, request, listener); - } - - /** - * Freezes or unfreeze one or more indices - */ - public void freeze(FreezeRequest request, ActionListener listener) { - client.execute(FreezeIndexAction.INSTANCE, request, listener); - } - - /** - * Freeze or unfreeze one or more indices - */ - public FreezeResponse freeze(FreezeRequest request) - throws ExecutionException, InterruptedException { - PlainActionFuture future = new PlainActionFuture<>(); - freeze(request, future); - return future.get(); - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java index 0c4477b6b700e..efe57f44e89dc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java @@ -90,28 +90,21 @@ public synchronized IndexerState start() { * Sets the internal state to {@link IndexerState#STOPPING} if an async job is * running in the background, {@link #onStop()} will be called when the background job * detects that the indexer is stopped. - * If there is no job running when this function is called - * the state is set to {@link IndexerState#STOPPED} and {@link #onStop()} called directly. + * If there is no job running when this function is called the returned + * state is {@link IndexerState#STOPPED} and {@link #onStop()} will not be called. * * @return The new state for the indexer (STOPPED, STOPPING or ABORTING if the job was already aborted). */ public synchronized IndexerState stop() { - AtomicBoolean wasStartedAndSetStopped = new AtomicBoolean(false); - IndexerState currentState = state.updateAndGet(previousState -> { + return state.updateAndGet(previousState -> { if (previousState == IndexerState.INDEXING) { return IndexerState.STOPPING; } else if (previousState == IndexerState.STARTED) { - wasStartedAndSetStopped.set(true); return IndexerState.STOPPED; } else { return previousState; } }); - - if (wasStartedAndSetStopped.get()) { - onStop(); - } - return currentState; } /** @@ -288,20 +281,24 @@ private void finishWithIndexingFailure(Exception exc) { } private IndexerState finishAndSetState() { - return state.updateAndGet(prev -> { + AtomicBoolean callOnStop = new AtomicBoolean(false); + AtomicBoolean callOnAbort = new AtomicBoolean(false); + IndexerState updatedState = state.updateAndGet(prev -> { + callOnAbort.set(false); + callOnStop.set(false); switch (prev) { case INDEXING: // ready for another job return IndexerState.STARTED; case STOPPING: + callOnStop.set(true); // must be started again - onStop(); return IndexerState.STOPPED; case ABORTING: + callOnAbort.set(true); // abort and exit - onAbort(); return IndexerState.ABORTING; // This shouldn't matter, since onAbort() will kill the task first case STOPPED: @@ -316,6 +313,14 @@ private IndexerState finishAndSetState() { throw new IllegalStateException("Indexer job encountered an illegal state [" + prev + "]"); } }); + + if (callOnStop.get()) { + onStop(); + } else if (callOnAbort.get()) { + onAbort(); + } + + return updatedState; } private void onSearchResponse(SearchResponse searchResponse) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java index f4eb7fbc4e53b..5961a2305eaa1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureAction.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; @@ -113,6 +114,7 @@ public boolean equals(Object other) { public static class Request extends ActionRequest { public static final ParseField LINES_TO_SAMPLE = new ParseField("lines_to_sample"); + public static final ParseField LINE_MERGE_SIZE_LIMIT = new ParseField("line_merge_size_limit"); public static final ParseField TIMEOUT = new ParseField("timeout"); public static final ParseField CHARSET = FileStructure.CHARSET; public static final ParseField FORMAT = FileStructure.FORMAT; @@ -130,6 +132,7 @@ public static class Request extends ActionRequest { "[%s] may only be specified if [" + FORMAT.getPreferredName() + "] is [%s]"; private Integer linesToSample; + private Integer lineMergeSizeLimit; private TimeValue timeout; private String charset; private FileStructure.Format format; @@ -154,6 +157,14 @@ public void setLinesToSample(Integer linesToSample) { this.linesToSample = linesToSample; } + public Integer getLineMergeSizeLimit() { + return lineMergeSizeLimit; + } + + public void setLineMergeSizeLimit(Integer lineMergeSizeLimit) { + this.lineMergeSizeLimit = lineMergeSizeLimit; + } + public TimeValue getTimeout() { return timeout; } @@ -291,6 +302,10 @@ public ActionRequestValidationException validate() { validationException = addValidationError("[" + LINES_TO_SAMPLE.getPreferredName() + "] must be positive if specified", validationException); } + if (lineMergeSizeLimit != null && lineMergeSizeLimit <= 0) { + validationException = addValidationError("[" + LINE_MERGE_SIZE_LIMIT.getPreferredName() + "] must be positive if specified", + validationException); + } if (format != FileStructure.Format.DELIMITED) { if (columnNames != null) { validationException = addIncompatibleArgError(COLUMN_NAMES, FileStructure.Format.DELIMITED, validationException); @@ -324,6 +339,9 @@ public ActionRequestValidationException validate() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); linesToSample = in.readOptionalVInt(); + if (in.getVersion().onOrAfter(Version.CURRENT)) { + lineMergeSizeLimit = in.readOptionalVInt(); + } timeout = in.readOptionalTimeValue(); charset = in.readOptionalString(); format = in.readBoolean() ? in.readEnum(FileStructure.Format.class) : null; @@ -342,6 +360,9 @@ public void readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalVInt(linesToSample); + if (out.getVersion().onOrAfter(Version.CURRENT)) { + out.writeOptionalVInt(lineMergeSizeLimit); + } out.writeOptionalTimeValue(timeout); out.writeOptionalString(charset); if (format == null) { @@ -378,8 +399,8 @@ public void writeTo(StreamOutput out) throws IOException { @Override public int hashCode() { - return Objects.hash(linesToSample, timeout, charset, format, columnNames, hasHeaderRow, delimiter, grokPattern, timestampFormat, - timestampField, sample); + return Objects.hash(linesToSample, lineMergeSizeLimit, timeout, charset, format, columnNames, hasHeaderRow, delimiter, + grokPattern, timestampFormat, timestampField, sample); } @Override @@ -395,6 +416,7 @@ public boolean equals(Object other) { Request that = (Request) other; return Objects.equals(this.linesToSample, that.linesToSample) && + Objects.equals(this.lineMergeSizeLimit, that.lineMergeSizeLimit) && Objects.equals(this.timeout, that.timeout) && Objects.equals(this.charset, that.charset) && Objects.equals(this.format, that.format) && diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/client/MonitoringClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/client/MonitoringClient.java deleted file mode 100644 index 2dba6e6a4664f..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/client/MonitoringClient.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.monitoring.client; - -import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkAction; -import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkRequest; -import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkRequestBuilder; -import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkResponse; - -import java.util.Map; - -public class MonitoringClient { - - private final Client client; - - @Inject - public MonitoringClient(Client client) { - this.client = client; - } - - - /** - * Creates a request builder that bulk index monitoring documents. - * - * @return The request builder - */ - public MonitoringBulkRequestBuilder prepareMonitoringBulk() { - return new MonitoringBulkRequestBuilder(client); - } - - /** - * Executes a bulk of index operations that concern monitoring documents. - * - * @param request The monitoring bulk request - * @param listener A listener to be notified with a result - */ - public void bulk(MonitoringBulkRequest request, ActionListener listener) { - client.execute(MonitoringBulkAction.INSTANCE, request, listener); - } - - /** - * Executes a bulk of index operations that concern monitoring documents. - * - * @param request The monitoring bulk request - */ - public ActionFuture bulk(MonitoringBulkRequest request) { - return client.execute(MonitoringBulkAction.INSTANCE, request); - } - - public MonitoringClient filterWithHeader(Map headers) { - return new MonitoringClient(client.filterWithHeader(headers)); - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/XPackRestHandler.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/XPackRestHandler.java deleted file mode 100644 index 5ac0969624bc8..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/XPackRestHandler.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.rest; - -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.xpack.core.XPackClient; - -import java.io.IOException; - -public abstract class XPackRestHandler extends BaseRestHandler { - - protected static String URI_BASE = "/_xpack"; - - public XPackRestHandler(Settings settings) { - super(settings); - } - - @Override - public final RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - return doPrepareRequest(request, new XPackClient(client)); - } - - protected abstract RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException; -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestFreezeIndexAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestFreezeIndexAction.java index 9604cdd8b3183..580acbae0dc87 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestFreezeIndexAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestFreezeIndexAction.java @@ -7,16 +7,17 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; +import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction.FreezeIndexAction; -public final class RestFreezeIndexAction extends XPackRestHandler { +public final class RestFreezeIndexAction extends BaseRestHandler { public RestFreezeIndexAction(Settings settings, RestController controller) { super(settings); controller.registerHandler(RestRequest.Method.POST, "/{index}/_freeze", this); @@ -24,7 +25,7 @@ public RestFreezeIndexAction(Settings settings, RestController controller) { } @Override - protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) { + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { boolean freeze = request.path().endsWith("/_freeze"); TransportFreezeIndexAction.FreezeRequest freezeRequest = new TransportFreezeIndexAction.FreezeRequest(Strings.splitStringByCommaToArray(request.param("index"))); @@ -36,7 +37,7 @@ protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient freezeRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards)); } freezeRequest.setFreeze(freeze); - return channel -> client.freeze(freezeRequest, new RestToXContentListener<>(channel)); + return channel -> client.execute(FreezeIndexAction.INSTANCE, freezeRequest, new RestToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackInfoAction.java index c057c04cc637d..2a41be1cb0cb1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackInfoAction.java @@ -5,13 +5,14 @@ */ package org.elasticsearch.xpack.core.rest.action; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.protocol.xpack.XPackInfoRequest; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; +import org.elasticsearch.xpack.core.action.XPackInfoRequestBuilder; import java.io.IOException; import java.util.EnumSet; @@ -19,11 +20,11 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.HEAD; -public class RestXPackInfoAction extends XPackRestHandler { +public class RestXPackInfoAction extends BaseRestHandler { public RestXPackInfoAction(Settings settings, RestController controller) { super(settings); - controller.registerHandler(HEAD, URI_BASE, this); - controller.registerHandler(GET, URI_BASE, this); + controller.registerHandler(HEAD, "/_xpack", this); + controller.registerHandler(GET, "/_xpack", this); } @Override @@ -32,7 +33,7 @@ public String getName() { } @Override - public RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { // we piggyback verbosity on "human" output boolean verbose = request.paramAsBoolean("human", true); @@ -40,7 +41,7 @@ public RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient cli EnumSet categories = XPackInfoRequest.Category .toSet(request.paramAsStringArray("categories", new String[] { "_all" })); return channel -> - client.prepareInfo() + new XPackInfoRequestBuilder(client) .setVerbose(verbose) .setCategories(categories) .execute(new RestToXContentListener<>(channel)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java index 0f09f17dbb066..44b04c3548f7b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java @@ -6,29 +6,29 @@ package org.elasticsearch.xpack.core.rest.action; import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.action.XPackUsageRequestBuilder; import org.elasticsearch.xpack.core.action.XPackUsageResponse; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.OK; -public class RestXPackUsageAction extends XPackRestHandler { +public class RestXPackUsageAction extends BaseRestHandler { public RestXPackUsageAction(Settings settings, RestController controller) { super(settings); - controller.registerHandler(GET, URI_BASE + "/usage", this); + controller.registerHandler(GET, "/_xpack/usage", this); } @Override @@ -37,11 +37,11 @@ public String getName() { } @Override - public RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final TimeValue masterTimeout = request.paramAsTime("master_timeout", MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT); - return channel -> new XPackUsageRequestBuilder(client.es()) + return channel -> new XPackUsageRequestBuilder(client) .setMasterNodeTimeout(masterTimeout) - .execute(new RestBuilderListener(channel) { + .execute(new RestBuilderListener<>(channel) { @Override public RestResponse buildResponse(XPackUsageResponse response, XContentBuilder builder) throws Exception { builder.startObject(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 49d4159f13968..ab06fc32e288f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -180,14 +180,22 @@ private static Map initializeReservedRoles() { RoleDescriptor.IndicesPrivileges.builder() .indices(".data-frame-notifications*") .privileges("view_index_metadata", "read").build() - }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) + }, + new RoleDescriptor.ApplicationResourcePrivileges[] { + RoleDescriptor.ApplicationResourcePrivileges.builder() + .application("kibana-*").resources("*").privileges("reserved_ml").build() + }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) .put("data_frame_transforms_user", new RoleDescriptor("data_frame_transforms_user", new String[] { "monitor_data_frame_transforms" }, new RoleDescriptor.IndicesPrivileges[]{ RoleDescriptor.IndicesPrivileges.builder() .indices(".data-frame-notifications*") .privileges("view_index_metadata", "read").build() - }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) + }, + new RoleDescriptor.ApplicationResourcePrivileges[] { + RoleDescriptor.ApplicationResourcePrivileges.builder() + .application("kibana-*").resources("*").privileges("reserved_ml").build() + }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) .put("watcher_admin", new RoleDescriptor("watcher_admin", new String[] { "manage_watcher" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(Watch.INDEX, TriggeredWatchStoreField.INDEX_NAME, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java index d0a086bd649f0..1d57df3b54199 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; @@ -222,7 +221,7 @@ public void onFailure(Exception e) { creationCheck.set(false); onPutPolicyFailure(policy, e); } - }, (req, listener) -> new XPackClient(client).ilmClient().putLifecyclePolicy(req, listener)); + }, (req, listener) -> client.execute(PutLifecycleAction.INSTANCE, req, listener)); }); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java index 9231bad9a8dfe..39407ef735974 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.index.engine; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; @@ -23,6 +24,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -37,9 +39,9 @@ import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ShardSearchLocalRequest; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction; +import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction.FreezeIndexAction; import org.hamcrest.Matchers; import java.io.IOException; @@ -69,8 +71,7 @@ public void testCloseFreezeAndOpen() throws ExecutionException, InterruptedExcep client().prepareIndex("index", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); client().prepareIndex("index", "_doc", "2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); client().prepareIndex("index", "_doc", "3").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("index"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("index")).actionGet()); expectThrows(ClusterBlockException.class, () -> client().prepareIndex("index", "_doc", "4").setSource("field", "value") .setRefreshPolicy(IMMEDIATE).get()); IndicesService indexServices = getInstanceFromNode(IndicesService.class); @@ -115,8 +116,7 @@ public void testSearchAndGetAPIsAreThrottled() throws InterruptedException, IOEx for (int i = 0; i < 10; i++) { client().prepareIndex("index", "_doc", "" + i).setSource("field", "foo bar baz").get(); } - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("index"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("index")).actionGet()); int numRequests = randomIntBetween(20, 50); CountDownLatch latch = new CountDownLatch(numRequests); ActionListener listener = ActionListener.wrap(latch::countDown); @@ -159,8 +159,7 @@ public void testFreezeAndUnfreeze() throws InterruptedException, ExecutionExcept // sometimes close it assertAcked(client().admin().indices().prepareClose("index").get()); } - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("index"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("index")).actionGet()); { IndicesService indexServices = getInstanceFromNode(IndicesService.class); Index index = resolveIndex("index"); @@ -169,7 +168,8 @@ public void testFreezeAndUnfreeze() throws InterruptedException, ExecutionExcept IndexShard shard = indexService.getShard(0); assertEquals(0, shard.refreshStats().getTotal()); } - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("index").setFreeze(false))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, + new TransportFreezeIndexAction.FreezeRequest("index").setFreeze(false)).actionGet()); { IndicesService indexServices = getInstanceFromNode(IndicesService.class); Index index = resolveIndex("index"); @@ -192,13 +192,12 @@ private void assertIndexFrozen(String idx) { public void testDoubleFreeze() throws ExecutionException, InterruptedException { createIndex("test-idx", Settings.builder().put("index.number_of_shards", 2).build()); - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("test-idx"))); - ExecutionException executionException = expectThrows(ExecutionException.class, - () -> xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("test-idx") + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("test-idx")).actionGet()); + ResourceNotFoundException exception = expectThrows(ResourceNotFoundException.class, + () -> client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("test-idx") .indicesOptions(new IndicesOptions(EnumSet.noneOf(IndicesOptions.Option.class), - EnumSet.of(IndicesOptions.WildcardStates.OPEN))))); - assertEquals("no index found to freeze", executionException.getCause().getMessage()); + EnumSet.of(IndicesOptions.WildcardStates.OPEN)))).actionGet()); + assertEquals("no index found to freeze", exception.getMessage()); } public void testUnfreezeClosedIndices() throws ExecutionException, InterruptedException { @@ -206,11 +205,10 @@ public void testUnfreezeClosedIndices() throws ExecutionException, InterruptedEx client().prepareIndex("idx", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); createIndex("idx-closed", Settings.builder().put("index.number_of_shards", 1).build()); client().prepareIndex("idx-closed", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("idx")).actionGet()); assertAcked(client().admin().indices().prepareClose("idx-closed").get()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx*").setFreeze(false) - .indicesOptions(IndicesOptions.strictExpand()))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("idx*").setFreeze(false) + .indicesOptions(IndicesOptions.strictExpand())).actionGet()); ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get(); assertEquals(IndexMetaData.State.CLOSE, stateResponse.getState().getMetaData().index("idx-closed").getState()); assertEquals(IndexMetaData.State.OPEN, stateResponse.getState().getMetaData().index("idx").getState()); @@ -222,8 +220,7 @@ public void testFreezePattern() throws ExecutionException, InterruptedException client().prepareIndex("test-idx", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); createIndex("test-idx-1", Settings.builder().put("index.number_of_shards", 1).build()); client().prepareIndex("test-idx-1", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("test-idx"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("test-idx")).actionGet()); assertIndexFrozen("test-idx"); IndicesStatsResponse index = client().admin().indices().prepareStats("test-idx").clear().setRefresh(true).get(); @@ -232,7 +229,7 @@ public void testFreezePattern() throws ExecutionException, InterruptedException index = client().admin().indices().prepareStats("test-idx").clear().setRefresh(true).get(); assertEquals(1, index.getTotal().refresh.getTotal()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("test*"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("test*")).actionGet()); assertIndexFrozen("test-idx"); assertIndexFrozen("test-idx-1"); index = client().admin().indices().prepareStats("test-idx").clear().setRefresh(true).get(); @@ -269,8 +266,7 @@ public void testCanMatch() throws ExecutionException, InterruptedException, IOEx new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, -1, null, null))); } - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("index"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("index")).actionGet()); { IndicesService indexServices = getInstanceFromNode(IndicesService.class); @@ -301,8 +297,7 @@ public void testCanMatch() throws ExecutionException, InterruptedException, IOEx public void testWriteToFrozenIndex() throws ExecutionException, InterruptedException { createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); client().prepareIndex("idx", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("idx")).actionGet()); assertIndexFrozen("idx"); expectThrows(ClusterBlockException.class, () -> client().prepareIndex("idx", "_doc", "2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get()); @@ -312,9 +307,8 @@ public void testIgnoreUnavailable() throws ExecutionException, InterruptedExcept createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); createIndex("idx-close", Settings.builder().put("index.number_of_shards", 1).build()); assertAcked(client().admin().indices().prepareClose("idx-close")); - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx*", "not_available") - .indicesOptions(IndicesOptions.fromParameters(null, "true", null, null, IndicesOptions.strictExpandOpen())))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("idx*", "not_available") + .indicesOptions(IndicesOptions.fromParameters(null, "true", null, null, IndicesOptions.strictExpandOpen()))).actionGet()); assertIndexFrozen("idx"); assertEquals(IndexMetaData.State.CLOSE, client().admin().cluster().prepareState().get().getState().metaData().index("idx-close").getState()); @@ -322,17 +316,17 @@ public void testIgnoreUnavailable() throws ExecutionException, InterruptedExcept public void testUnfreezeClosedIndex() throws ExecutionException, InterruptedException { createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx"))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("idx")).actionGet()); assertAcked(client().admin().indices().prepareClose("idx")); assertEquals(IndexMetaData.State.CLOSE, client().admin().cluster().prepareState().get().getState().metaData().index("idx").getState()); - expectThrows(ExecutionException.class, - () -> xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("id*").setFreeze(false) + expectThrows(IndexNotFoundException.class, + () -> client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest("id*").setFreeze(false) .indicesOptions(new IndicesOptions(EnumSet.noneOf(IndicesOptions.Option.class), - EnumSet.of(IndicesOptions.WildcardStates.OPEN))))); + EnumSet.of(IndicesOptions.WildcardStates.OPEN)))).actionGet()); // we don't resolve to closed indices - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx").setFreeze(false))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, + new TransportFreezeIndexAction.FreezeRequest("idx").setFreeze(false)).actionGet()); assertEquals(IndexMetaData.State.OPEN, client().admin().cluster().prepareState().get().getState().metaData().index("idx").getState()); } @@ -345,8 +339,7 @@ public void testFreezeIndexIncreasesIndexSettingsVersion() throws ExecutionExcep final long settingsVersion = client().admin().cluster().prepareState().get() .getState().metaData().index(index).getSettingsVersion(); - XPackClient xPackClient = new XPackClient(client()); - assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest(index))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest(index)).actionGet()); assertIndexFrozen(index); assertThat(client().admin().cluster().prepareState().get().getState().metaData().index(index).getSettingsVersion(), greaterThan(settingsVersion)); @@ -374,7 +367,7 @@ public void testFreezeEmptyIndexWithTranslogOps() throws Exception { assertThat(indexService.getShard(0).getGlobalCheckpoint(), greaterThanOrEqualTo(nbNoOps - 1L)); }); - assertAcked(new XPackClient(client()).freeze(new TransportFreezeIndexAction.FreezeRequest(indexName))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest(indexName)).actionGet()); assertIndexFrozen(indexName); } @@ -390,7 +383,7 @@ public void testRecoveryState() throws ExecutionException, InterruptedException assertThat(indexResponse.status(), is(RestStatus.CREATED)); } - assertAcked(new XPackClient(client()).freeze(new TransportFreezeIndexAction.FreezeRequest(indexName))); + assertAcked(client().execute(FreezeIndexAction.INSTANCE, new TransportFreezeIndexAction.FreezeRequest(indexName)).actionGet()); assertIndexFrozen(indexName); final IndexMetaData indexMetaData = client().admin().cluster().prepareState().get().getState().metaData().index(indexName); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index fc86a9554880f..053e41d9b2a63 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -268,25 +268,6 @@ public void testStateMachineBrokenSearch() throws InterruptedException { } } - public void testStop_AfterIndexerIsFinished() throws InterruptedException { - AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); - final ExecutorService executor = Executors.newFixedThreadPool(1); - try { - CountDownLatch countDownLatch = new CountDownLatch(1); - MockIndexer indexer = new MockIndexer(executor, state, 2, countDownLatch, false); - indexer.start(); - assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); - countDownLatch.countDown(); - assertTrue(awaitBusy(() -> isFinished.get())); - - indexer.stop(); - assertTrue(isStopped.get()); - assertThat(indexer.getState(), equalTo(IndexerState.STOPPED)); - } finally { - executor.shutdownNow(); - } - } - public void testStop_WhileIndexing() throws InterruptedException { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionRequestTests.java index ba69e6f750c96..e72b054015885 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FindFileStructureActionRequestTests.java @@ -26,6 +26,10 @@ protected FindFileStructureAction.Request createTestInstance() { request.setLinesToSample(randomIntBetween(10, 2000)); } + if (randomBoolean()) { + request.setLineMergeSizeLimit(randomIntBetween(1000, 20000)); + } + if (randomBoolean()) { request.setCharset(randomAlphaOfLength(10)); } @@ -85,6 +89,18 @@ public void testValidateLinesToSample() { assertThat(e.getMessage(), containsString(" [lines_to_sample] must be positive if specified")); } + public void testValidateLineMergeSizeLimit() { + + FindFileStructureAction.Request request = new FindFileStructureAction.Request(); + request.setLineMergeSizeLimit(randomIntBetween(-1, 0)); + request.setSample(new BytesArray("foo\n")); + + ActionRequestValidationException e = request.validate(); + assertNotNull(e); + assertThat(e.getMessage(), startsWith("Validation Failed: ")); + assertThat(e.getMessage(), containsString(" [line_merge_size_limit] must be positive if specified")); + } + public void testValidateNonDelimited() { FindFileStructureAction.Request request = new FindFileStructureAction.Request(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 78f9623f4fbb8..bf2c08a913821 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -1096,6 +1096,18 @@ public void testDataFrameTransformsAdminRole() { assertNoAccessAllowed(role, ".data-frame-internal-1"); // internal use only assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); + + final String kibanaApplicationWithRandomIndex = "kibana-" + randomFrom(randomAlphaOfLengthBetween(8, 24), ".kibana"); + assertThat(role.application().grants( + new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-foo", "foo"), "*"), is(false)); + assertThat(role.application().grants( + new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-reserved_ml", "reserved_ml"), "*"), is(true)); + + final String otherApplication = "logstash-" + randomAlphaOfLengthBetween(8, 24); + assertThat(role.application().grants( + new ApplicationPrivilege(otherApplication, "app-foo", "foo"), "*"), is(false)); + assertThat(role.application().grants( + new ApplicationPrivilege(otherApplication, "app-reserved_ml", "reserved_ml"), "*"), is(false)); } public void testDataFrameTransformsUserRole() { @@ -1120,6 +1132,18 @@ public void testDataFrameTransformsUserRole() { assertNoAccessAllowed(role, ".data-frame-internal-1"); assertNoAccessAllowed(role, RestrictedIndicesNames.RESTRICTED_NAMES); + + final String kibanaApplicationWithRandomIndex = "kibana-" + randomFrom(randomAlphaOfLengthBetween(8, 24), ".kibana"); + assertThat(role.application().grants( + new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-foo", "foo"), "*"), is(false)); + assertThat(role.application().grants( + new ApplicationPrivilege(kibanaApplicationWithRandomIndex, "app-reserved_ml", "reserved_ml"), "*"), is(true)); + + final String otherApplication = "logstash-" + randomAlphaOfLengthBetween(8, 24); + assertThat(role.application().grants( + new ApplicationPrivilege(otherApplication, "app-foo", "foo"), "*"), is(false)); + assertThat(role.application().grants( + new ApplicationPrivilege(otherApplication, "app-reserved_ml", "reserved_ml"), "*"), is(false)); } public void testWatcherAdminRole() { diff --git a/x-pack/plugin/data-frame/build.gradle b/x-pack/plugin/data-frame/build.gradle index 1e939b2ceb949..d822e3a61896d 100644 --- a/x-pack/plugin/data-frame/build.gradle +++ b/x-pack/plugin/data-frame/build.gradle @@ -9,7 +9,7 @@ esplugin { } dependencies { - compileOnly "org.elasticsearch:elasticsearch:${version}" + compileOnly project(":server") compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java index 1ec425c641693..69fb980871dce 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java @@ -30,7 +30,6 @@ public void cleanTransforms() throws IOException { cleanUp(); } - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public void testDataFrameTransformCrud() throws Exception { createReviewsIndex(); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java index 7dc79c1ae8fbe..9884c9bb6793b 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; import org.junit.Before; @@ -23,7 +22,6 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.is; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameAuditorIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_admin_plus_data"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java index d7e12cf2bee4d..681599331c8af 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java @@ -8,7 +8,6 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -23,7 +22,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameConfigurationIndexIT extends DataFrameRestTestCase { /** diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java index 9bac6ca0b4049..d9927cd09ed8f 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.xpack.core.dataframe.DataFrameField; @@ -22,7 +21,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_user"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java index 5b95d1daead53..26a957ea055c2 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -16,7 +15,6 @@ import java.io.IOException; import java.util.Map; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameMetaDataIT extends DataFrameRestTestCase { private boolean indicesCreated = false; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index 3c661a0f4aca4..933fcc6c8e5c4 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.junit.Before; @@ -22,7 +21,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFramePivotRestIT extends DataFrameRestTestCase { private static final String TEST_USER_NAME = "df_admin_plus_data"; @@ -251,10 +249,10 @@ public void testPreviewTransform() throws Exception { config += " \"pivot\": {" + " \"group_by\": {" - + " \"reviewer\": {\"terms\": { \"field\": \"user_id\" }}," + + " \"user.id\": {\"terms\": { \"field\": \"user_id\" }}," + " \"by_day\": {\"date_histogram\": {\"fixed_interval\": \"1d\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-dd\"}}}," + " \"aggregations\": {" - + " \"avg_rating\": {" + + " \"user.avg_rating\": {" + " \"avg\": {" + " \"field\": \"stars\"" + " } } } }" @@ -265,10 +263,14 @@ public void testPreviewTransform() throws Exception { List> preview = (List>)previewDataframeResponse.get("preview"); // preview is limited to 100 assertThat(preview.size(), equalTo(100)); - Set expectedFields = new HashSet<>(Arrays.asList("reviewer", "by_day", "avg_rating")); + Set expectedTopLevelFields = new HashSet<>(Arrays.asList("user", "by_day")); + Set expectedNestedFields = new HashSet<>(Arrays.asList("id", "avg_rating")); preview.forEach(p -> { Set keys = p.keySet(); - assertThat(keys, equalTo(expectedFields)); + assertThat(keys, equalTo(expectedTopLevelFields)); + Map nestedObj = (Map)p.get("user"); + keys = nestedObj.keySet(); + assertThat(keys, equalTo(expectedNestedFields)); }); } diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java index 7b63644dd34ad..96aeeda8755f4 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; @@ -20,7 +19,6 @@ import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { public void testDummy() { diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java index f98fa6a271365..4f209c5a9f3f4 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.dataframe.integration; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -23,7 +22,6 @@ import static org.elasticsearch.xpack.core.dataframe.DataFrameField.INDEX_DOC_TYPE; import static org.elasticsearch.xpack.dataframe.DataFrameFeatureSet.PROVIDED_STATS; -@LuceneTestCase.AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42344") public class DataFrameUsageIT extends DataFrameRestTestCase { private boolean indicesCreated = false; diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java index ac40334dfb443..1e0fcd31fb2d5 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction.Request; +import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import java.io.IOException; @@ -30,14 +31,16 @@ public class TransportDeleteDataFrameTransformAction extends TransportMasterNodeAction { private final DataFrameTransformsConfigManager transformsConfigManager; + private final DataFrameAuditor auditor; @Inject public TransportDeleteDataFrameTransformAction(TransportService transportService, ActionFilters actionFilters, ThreadPool threadPool, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - DataFrameTransformsConfigManager transformsConfigManager) { + DataFrameTransformsConfigManager transformsConfigManager, DataFrameAuditor auditor) { super(DeleteDataFrameTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, Request::new, indexNameExpressionResolver); this.transformsConfigManager = transformsConfigManager; + this.auditor = auditor; } @Override @@ -65,7 +68,10 @@ protected void masterOperation(Request request, ClusterState state, ActionListen } else { // Task is not running, delete the configuration document transformsConfigManager.deleteTransform(request.getId(), ActionListener.wrap( - r -> listener.onResponse(new AcknowledgedResponse(r)), + r -> { + auditor.info(request.getId(), "Deleted data frame transform."); + listener.onResponse(new AcknowledgedResponse(r)); + }, listener::onFailure)); } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java index df2d09a875d19..d814714ab6653 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java @@ -26,7 +26,10 @@ import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction.Request; import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction.Response; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStateAndStats; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.dataframe.checkpoint.DataFrameTransformsCheckpointService; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; @@ -136,7 +139,21 @@ private void collectStatsForTransformsWithoutTasks(Request request, ActionListener> searchStatsListener = ActionListener.wrap( stats -> { List allStateAndStats = response.getTransformsStateAndStats(); - allStateAndStats.addAll(stats); + // If the persistent task does NOT exist, it is STOPPED + // There is a potential race condition where the saved document does not actually have a STOPPED state + // as the task is cancelled before we persist state. + stats.forEach(stat -> + allStateAndStats.add(new DataFrameTransformStateAndStats( + stat.getId(), + new DataFrameTransformState(DataFrameTransformTaskState.STOPPED, + IndexerState.STOPPED, + stat.getTransformState().getPosition(), + stat.getTransformState().getCheckpoint(), + stat.getTransformState().getReason(), + stat.getTransformState().getProgress()), + stat.getTransformStats(), + stat.getCheckpointingInfo())) + ); transformsWithoutTasks.removeAll( stats.stream().map(DataFrameTransformStateAndStats::getId).collect(Collectors.toSet())); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java index 997739b2407a7..b4d5957c0f54c 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java @@ -46,6 +46,7 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.permission.ResourcePrivileges; import org.elasticsearch.xpack.core.security.support.Exceptions; +import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; @@ -65,12 +66,14 @@ public class TransportPutDataFrameTransformAction private final Client client; private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; private final SecurityContext securityContext; + private final DataFrameAuditor auditor; @Inject public TransportPutDataFrameTransformAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, XPackLicenseState licenseState, - DataFrameTransformsConfigManager dataFrameTransformsConfigManager, Client client) { + DataFrameTransformsConfigManager dataFrameTransformsConfigManager, Client client, + DataFrameAuditor auditor) { super(PutDataFrameTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, PutDataFrameTransformAction.Request::new, indexNameExpressionResolver); this.licenseState = licenseState; @@ -78,6 +81,7 @@ public TransportPutDataFrameTransformAction(Settings settings, TransportService this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; this.securityContext = XPackSettings.SECURITY_ENABLED.get(settings) ? new SecurityContext(settings, threadPool.getThreadContext()) : null; + this.auditor = auditor; } @Override @@ -234,7 +238,10 @@ private void putDataFrame(DataFrameTransformConfig config, ActionListener Return the listener, or clean up destination index on failure. ActionListener putTransformConfigurationListener = ActionListener.wrap( - putTransformConfigurationResult -> listener.onResponse(new AcknowledgedResponse(true)), + putTransformConfigurationResult -> { + auditor.info(config.getId(), "Created data frame transform."); + listener.onResponse(new AcknowledgedResponse(true)); + }, listener::onFailure ); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java index 8b7bcb8d764e3..e23e54d67b524 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java @@ -184,11 +184,10 @@ protected void masterOperation(StartDataFrameTransformAction.Request request, if(dest.length == 0) { auditor.info(request.getId(), - "Could not find destination index [" + destinationIndex + "]." + - " Creating index with deduced mappings."); + "Creating destination index [" + destinationIndex + "] with deduced mappings."); createDestinationIndex(config, createOrGetIndexListener); } else { - auditor.info(request.getId(), "Destination index [" + destinationIndex + "] already exists."); + auditor.info(request.getId(), "Using existing destination index [" + destinationIndex + "]."); ClientHelper.executeAsyncWithOrigin(client.threadPool().getThreadContext(), ClientHelper.DATA_FRAME_ORIGIN, client.admin() diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index fe70e6f84b221..5b194fa6a0b23 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -70,7 +70,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S private final Map initialPosition; private final IndexerState initialIndexerState; - private final SetOnce indexer = new SetOnce<>(); + private final SetOnce indexer = new SetOnce<>(); private final AtomicReference taskState; private final AtomicReference stateReason; @@ -129,7 +129,7 @@ public Status getStatus() { return getState(); } - private DataFrameIndexer getIndexer() { + private ClientDataFrameIndexer getIndexer() { return indexer.get(); } @@ -217,7 +217,8 @@ public synchronized void start(Long startingCheckpoint, ActionListener logger.info("Updating state for data frame transform [{}] to [{}]", transform.getId(), state.toString()); persistStateToClusterState(state, ActionListener.wrap( task -> { - auditor.info(transform.getId(), "Updated state to [" + state.getTaskState() + "]"); + auditor.info(transform.getId(), + "Updated data frame transform state to [" + state.getTaskState() + "]."); long now = System.currentTimeMillis(); // kick off the indexer triggered(new Event(schedulerJobName(), now, now)); @@ -240,7 +241,14 @@ public synchronized void stop() { return; } - getIndexer().stop(); + if (getIndexer().getState() == IndexerState.STOPPED) { + return; + } + + IndexerState state = getIndexer().stop(); + if (state == IndexerState.STOPPED) { + getIndexer().doSaveState(state, getIndexer().getPosition(), () -> getIndexer().onStop()); + } } @Override @@ -305,10 +313,9 @@ void persistStateToClusterState(DataFrameTransformState state, synchronized void markAsFailed(String reason, ActionListener listener) { taskState.set(DataFrameTransformTaskState.FAILED); stateReason.set(reason); + auditor.error(transform.getId(), reason); persistStateToClusterState(getState(), ActionListener.wrap( - r -> { - listener.onResponse(null); - }, + r -> listener.onResponse(null), listener::onFailure )); } @@ -554,11 +561,17 @@ protected void doSaveState(IndexerState indexerState, Map positi next.run(); return; } + // If we are `STOPPED` on a `doSaveState` call, that indicates we transitioned to `STOPPED` from `STOPPING` + // OR we called `doSaveState` manually as the indexer was not actively running. + // Since we save the state to an index, we should make sure that our task state is in parity with the indexer state + if (indexerState.equals(IndexerState.STOPPED)) { + transformTask.setTaskStateStopped(); + } final DataFrameTransformState state = new DataFrameTransformState( transformTask.taskState.get(), indexerState, - getPosition(), + position, transformTask.currentCheckpoint.get(), transformTask.stateReason.get(), getProgress()); @@ -566,28 +579,20 @@ protected void doSaveState(IndexerState indexerState, Map positi // Persisting stats when we call `doSaveState` should be ok as we only call it on a state transition and // only every-so-often when doing the bulk indexing calls. See AsyncTwoPhaseIndexer#onBulkResponse for current periodicity - ActionListener> updateClusterStateListener = ActionListener.wrap( - task -> { - transformsConfigManager.putOrUpdateTransformStats( - new DataFrameTransformStateAndStats(transformId, state, getStats(), - DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null - ActionListener.wrap( - r -> { - next.run(); - }, - statsExc -> { - logger.error("Updating stats of transform [" + transformConfig.getId() + "] failed", statsExc); - next.run(); - } - )); - }, - exc -> { - logger.error("Updating persistent state of transform [" + transformConfig.getId() + "] failed", exc); - next.run(); - } - ); - - transformTask.persistStateToClusterState(state, updateClusterStateListener); + transformsConfigManager.putOrUpdateTransformStats( + new DataFrameTransformStateAndStats(transformId, state, getStats(), + DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null + ActionListener.wrap( + r -> { + next.run(); + }, + statsExc -> { + logger.error("Updating stats of transform [" + transformConfig.getId() + "] failed", statsExc); + auditor.warning(getJobId(), + "Failure updating stats of transform: " + statsExc.getMessage()); + next.run(); + } + )); } @Override @@ -613,7 +618,7 @@ protected void onFinish(ActionListener listener) { try { super.onFinish(listener); long checkpoint = transformTask.currentCheckpoint.incrementAndGet(); - auditor.info(transformTask.getTransformId(), "Finished indexing for data frame transform checkpoint [" + checkpoint + "]"); + auditor.info(transformTask.getTransformId(), "Finished indexing for data frame transform checkpoint [" + checkpoint + "]."); logger.info( "Finished indexing for data frame transform [" + transformTask.getTransformId() + "] checkpoint [" + checkpoint + "]"); listener.onResponse(null); @@ -624,27 +629,14 @@ protected void onFinish(ActionListener listener) { @Override protected void onStop() { - auditor.info(transformConfig.getId(), "Indexer has stopped"); + auditor.info(transformConfig.getId(), "Data frame transform has stopped."); logger.info("Data frame transform [{}] indexer has stopped", transformConfig.getId()); - - transformTask.setTaskStateStopped(); - transformsConfigManager.putOrUpdateTransformStats( - new DataFrameTransformStateAndStats(transformId, transformTask.getState(), getStats(), - DataFrameTransformCheckpointingInfo.EMPTY), // TODO should this be null - ActionListener.wrap( - r -> { - transformTask.shutdown(); - }, - statsExc -> { - transformTask.shutdown(); - logger.error("Updating saving stats of transform [" + transformConfig.getId() + "] failed", statsExc); - } - )); + transformTask.shutdown(); } @Override protected void onAbort() { - auditor.info(transformConfig.getId(), "Received abort request, stopping indexer"); + auditor.info(transformConfig.getId(), "Received abort request, stopping data frame transform."); logger.info("Data frame transform [" + transformConfig.getId() + "] received abort request, stopping indexer"); transformTask.shutdown(); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java index f8857591b2322..6201dd936ba19 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java @@ -61,7 +61,7 @@ public static Stream> extractCompositeAggregationResults(Com groups.getGroups().keySet().forEach(destinationFieldName -> { Object value = bucket.getKey().get(destinationFieldName); idGen.add(destinationFieldName, value); - document.put(destinationFieldName, value); + updateDocument(document, destinationFieldName, value); }); List aggNames = aggregationBuilders.stream().map(AggregationBuilder::getName).collect(Collectors.toList()); diff --git a/x-pack/plugin/deprecation/build.gradle b/x-pack/plugin/deprecation/build.gradle index 62d2a891929a4..bbf235131d772 100644 --- a/x-pack/plugin/deprecation/build.gradle +++ b/x-pack/plugin/deprecation/build.gradle @@ -10,7 +10,7 @@ esplugin { archivesBaseName = 'x-pack-deprecation' dependencies { - compileOnly "org.elasticsearch.plugin:x-pack-core:${version}" + compileOnly project(":x-pack:plugin:core") } integTest.enabled = false diff --git a/x-pack/plugin/graph/build.gradle b/x-pack/plugin/graph/build.gradle index 286f8ac0a8917..a164189dd6a06 100644 --- a/x-pack/plugin/graph/build.gradle +++ b/x-pack/plugin/graph/build.gradle @@ -10,7 +10,6 @@ esplugin { archivesBaseName = 'x-pack-graph' dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') if (isEclipse) { diff --git a/x-pack/plugin/graph/qa/with-security/build.gradle b/x-pack/plugin/graph/qa/with-security/build.gradle index f0f819b46d478..a79d72d0c7b55 100644 --- a/x-pack/plugin/graph/qa/with-security/build.gradle +++ b/x-pack/plugin/graph/qa/with-security/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile project(":x-pack:plugin:core") } // bring in graph rest test suite diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java index 130d6deed567f..80d4d638a0a09 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationLogger; @@ -19,11 +20,10 @@ import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost; import org.elasticsearch.protocol.xpack.graph.Hop; import org.elasticsearch.protocol.xpack.graph.VertexRequest; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; import java.util.HashMap; @@ -38,7 +38,7 @@ /** * @see GraphExploreRequest */ -public class RestGraphAction extends XPackRestHandler { +public class RestGraphAction extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestGraphAction.class)); public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + @@ -68,19 +68,19 @@ public RestGraphAction(Settings settings, RestController controller) { // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( GET, "/{index}/_graph/explore", this, - GET, "/{index}" + URI_BASE + "/graph/_explore", deprecationLogger); + GET, "/{index}/_xpack/graph/_explore", deprecationLogger); // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( POST, "/{index}/_graph/explore", this, - POST, "/{index}" + URI_BASE + "/graph/_explore", deprecationLogger); + POST, "/{index}/_xpack/graph/_explore", deprecationLogger); // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( GET, "/{index}/{type}/_graph/explore", this, - GET, "/{index}/{type}" + URI_BASE + "/graph/_explore", deprecationLogger); + GET, "/{index}/{type}/_xpack/graph/_explore", deprecationLogger); // TODO: remove deprecated endpoint in 8.0.0 controller.registerWithDeprecatedHandler( POST, "/{index}/{type}/_graph/explore", this, - POST, "/{index}/{type}" + URI_BASE + "/graph/_explore", deprecationLogger); + POST, "/{index}/{type}/_xpack/graph/_explore", deprecationLogger); } @Override @@ -89,7 +89,7 @@ public String getName() { } @Override - public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPackClient client) throws IOException { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { GraphExploreRequest graphRequest = new GraphExploreRequest(Strings.splitStringByCommaToArray(request.param("index"))); graphRequest.indicesOptions(IndicesOptions.fromRequest(request, graphRequest.indicesOptions())); graphRequest.routing(request.param("routing")); @@ -117,7 +117,7 @@ public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPa deprecationLogger.deprecatedAndMaybeLog("graph_with_types", TYPES_DEPRECATION_MESSAGE); graphRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); } - return channel -> client.es().execute(INSTANCE, graphRequest, new RestToXContentListener<>(channel)); + return channel -> client.execute(INSTANCE, graphRequest, new RestToXContentListener<>(channel)); } private void parseHop(XContentParser parser, Hop currentHop, GraphExploreRequest graphRequest) throws IOException { diff --git a/x-pack/plugin/ilm/build.gradle b/x-pack/plugin/ilm/build.gradle index 2b80ddc04207b..4e0c44ef8e4ab 100644 --- a/x-pack/plugin/ilm/build.gradle +++ b/x-pack/plugin/ilm/build.gradle @@ -13,7 +13,6 @@ esplugin { archivesBaseName = 'x-pack-ilm' dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') if (isEclipse) { diff --git a/x-pack/plugin/logstash/build.gradle b/x-pack/plugin/logstash/build.gradle index b799dbd4ceb95..35699ba9b96a5 100644 --- a/x-pack/plugin/logstash/build.gradle +++ b/x-pack/plugin/logstash/build.gradle @@ -10,7 +10,6 @@ esplugin { archivesBaseName = 'x-pack-logstash' dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') if (isEclipse) { diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index c09ced75bdf06..bd398d6da6a2d 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -49,9 +49,8 @@ compileJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try, compileTestJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes,-serial,-try,-unchecked" dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(':modules:lang-painless:spi') compileOnly project(path: xpackModule('core'), configuration: 'default') - compileOnly "org.elasticsearch.plugin:elasticsearch-scripting-painless-spi:${versions.elasticsearch}" testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') if (isEclipse) { testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') @@ -60,7 +59,7 @@ dependencies { testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') // ml deps - compile project(':libs:grok') + compile project(':libs:elasticsearch-grok') compile "com.ibm.icu:icu4j:${versions.icu4j}" compile "net.sf.supercsv:super-csv:${versions.supercsv}" nativeBundle "org.elasticsearch.ml:ml-cpp:${project.version}@zip" diff --git a/x-pack/plugin/ml/qa/basic-multi-node/build.gradle b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle index b47016c134459..fc27aa97d7a84 100644 --- a/x-pack/plugin/ml/qa/basic-multi-node/build.gradle +++ b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile project(":x-pack:plugin:core") testCompile project(path: xpackModule('ml'), configuration: 'runtime') } diff --git a/x-pack/plugin/ml/qa/disabled/build.gradle b/x-pack/plugin/ml/qa/disabled/build.gradle index 2aa5d47acef0d..ee49189ae1a87 100644 --- a/x-pack/plugin/ml/qa/disabled/build.gradle +++ b/x-pack/plugin/ml/qa/disabled/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile project(":x-pack:plugin:core") testCompile project(path: xpackModule('ml'), configuration: 'runtime') } diff --git a/x-pack/plugin/ml/qa/ml-with-security/build.gradle b/x-pack/plugin/ml/qa/ml-with-security/build.gradle index e85603b6aa89b..910271f10f175 100644 --- a/x-pack/plugin/ml/qa/ml-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/ml-with-security/build.gradle @@ -2,7 +2,6 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') if (isEclipse) { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle index e6fd8412c948b..a835d74e9236a 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/build.gradle @@ -2,7 +2,6 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') if (isEclipse) { diff --git a/x-pack/plugin/ml/qa/no-bootstrap-tests/build.gradle b/x-pack/plugin/ml/qa/no-bootstrap-tests/build.gradle index 9eac3fdd37a80..1908fb8e0927d 100644 --- a/x-pack/plugin/ml/qa/no-bootstrap-tests/build.gradle +++ b/x-pack/plugin/ml/qa/no-bootstrap-tests/build.gradle @@ -1,6 +1,6 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile project(":x-pack:plugin:core") testCompile project(path: xpackModule('ml'), configuration: 'runtime') } diff --git a/x-pack/plugin/ml/qa/single-node-tests/build.gradle b/x-pack/plugin/ml/qa/single-node-tests/build.gradle index f856c3d4c5ff4..a51e0a3141c0e 100644 --- a/x-pack/plugin/ml/qa/single-node-tests/build.gradle +++ b/x-pack/plugin/ml/qa/single-node-tests/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile project(":x-pack:plugin:core") testCompile project(path: xpackModule('ml'), configuration: 'runtime') } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFindFileStructureAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFindFileStructureAction.java index 41b9cab23e1ed..3d4d78e7a954f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFindFileStructureAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFindFileStructureAction.java @@ -49,7 +49,7 @@ private FindFileStructureAction.Response buildFileStructureResponse(FindFileStru FileStructureFinderManager structureFinderManager = new FileStructureFinderManager(threadPool.scheduler()); FileStructureFinder fileStructureFinder = structureFinderManager.findFileStructure(request.getLinesToSample(), - request.getSample().streamInput(), new FileStructureOverrides(request), request.getTimeout()); + request.getLineMergeSizeLimit(), request.getSample().streamInput(), new FileStructureOverrides(request), request.getTimeout()); return new FindFileStructureAction.Response(fileStructureFinder.getStructure()); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderFactory.java index 982a6ff703572..cc15491f2e6ca 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderFactory.java @@ -62,7 +62,8 @@ public boolean canCreateFromSample(List explanation, String sample) { @Override public FileStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker, - FileStructureOverrides overrides, TimeoutChecker timeoutChecker) throws IOException { + int lineMergeSizeLimit, FileStructureOverrides overrides, TimeoutChecker timeoutChecker) + throws IOException { return DelimitedFileStructureFinder.makeDelimitedFileStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, csvPreference, trimFields, overrides, timeoutChecker); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderFactory.java index 8790b8f526864..45edf96ce564f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderFactory.java @@ -37,6 +37,7 @@ public interface FileStructureFinderFactory { * @param sample A sample from the file to be ingested. * @param charsetName The name of the character set in which the sample was provided. * @param hasByteOrderMarker Did the sample have a byte order marker? null means "not relevant". + * @param lineMergeSizeLimit Maximum number of characters permitted when lines are merged to create messages. * @param overrides Stores structure decisions that have been made by the end user, and should * take precedence over anything the {@link FileStructureFinder} may decide. * @param timeoutChecker Will abort the operation if its timeout is exceeded. @@ -44,5 +45,6 @@ public interface FileStructureFinderFactory { * @throws Exception if something goes wrong during creation. */ FileStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker, - FileStructureOverrides overrides, TimeoutChecker timeoutChecker) throws Exception; + int lineMergeSizeLimit, FileStructureOverrides overrides, + TimeoutChecker timeoutChecker) throws Exception; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManager.java index 1a0e41c5e135e..32a2d7244d8ae 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManager.java @@ -42,6 +42,7 @@ public final class FileStructureFinderManager { public static final int MIN_SAMPLE_LINE_COUNT = 2; public static final int DEFAULT_IDEAL_SAMPLE_LINE_COUNT = 1000; + public static final int DEFAULT_LINE_MERGE_SIZE_LIMIT = 10000; static final Set FILEBEAT_SUPPORTED_ENCODINGS = Set.of( "866", @@ -294,8 +295,9 @@ public FileStructureFinderManager(ScheduledExecutorService scheduler) { this.scheduler = Objects.requireNonNull(scheduler); } - public FileStructureFinder findFileStructure(Integer idealSampleLineCount, InputStream fromFile) throws Exception { - return findFileStructure(idealSampleLineCount, fromFile, FileStructureOverrides.EMPTY_OVERRIDES, null); + public FileStructureFinder findFileStructure(Integer idealSampleLineCount, Integer lineMergeSizeLimit, + InputStream fromFile) throws Exception { + return findFileStructure(idealSampleLineCount, lineMergeSizeLimit, fromFile, FileStructureOverrides.EMPTY_OVERRIDES, null); } /** @@ -304,6 +306,8 @@ public FileStructureFinder findFileStructure(Integer idealSampleLineCount, Input * If the stream has fewer lines then an attempt will still be made, providing at * least {@link #MIN_SAMPLE_LINE_COUNT} lines can be read. If null * the value of {@link #DEFAULT_IDEAL_SAMPLE_LINE_COUNT} will be used. + * @param lineMergeSizeLimit Maximum number of characters permitted when lines are merged to create messages. + * If null the value of {@link #DEFAULT_LINE_MERGE_SIZE_LIMIT} will be used. * @param fromFile A stream from which the sample will be read. * @param overrides Aspects of the file structure that are known in advance. These take precedence over * values determined by structure analysis. An exception will be thrown if the file structure @@ -314,20 +318,21 @@ public FileStructureFinder findFileStructure(Integer idealSampleLineCount, Input * @return A {@link FileStructureFinder} object from which the structure and messages can be queried. * @throws Exception A variety of problems could occur at various stages of the structure finding process. */ - public FileStructureFinder findFileStructure(Integer idealSampleLineCount, InputStream fromFile, FileStructureOverrides overrides, - TimeValue timeout) - throws Exception { + public FileStructureFinder findFileStructure(Integer idealSampleLineCount, Integer lineMergeSizeLimit, InputStream fromFile, + FileStructureOverrides overrides, TimeValue timeout) throws Exception { return findFileStructure(new ArrayList<>(), (idealSampleLineCount == null) ? DEFAULT_IDEAL_SAMPLE_LINE_COUNT : idealSampleLineCount, - fromFile, overrides, timeout); + (lineMergeSizeLimit == null) ? DEFAULT_LINE_MERGE_SIZE_LIMIT : lineMergeSizeLimit, fromFile, overrides, timeout); } - public FileStructureFinder findFileStructure(List explanation, int idealSampleLineCount, InputStream fromFile) - throws Exception { - return findFileStructure(explanation, idealSampleLineCount, fromFile, FileStructureOverrides.EMPTY_OVERRIDES, null); + public FileStructureFinder findFileStructure(List explanation, int idealSampleLineCount, int lineMergeSizeLimit, + InputStream fromFile) throws Exception { + return findFileStructure(explanation, idealSampleLineCount, lineMergeSizeLimit, fromFile, FileStructureOverrides.EMPTY_OVERRIDES, + null); } - public FileStructureFinder findFileStructure(List explanation, int idealSampleLineCount, InputStream fromFile, - FileStructureOverrides overrides, TimeValue timeout) throws Exception { + public FileStructureFinder findFileStructure(List explanation, int idealSampleLineCount, int lineMergeSizeLimit, + InputStream fromFile, FileStructureOverrides overrides, + TimeValue timeout) throws Exception { try (TimeoutChecker timeoutChecker = new TimeoutChecker("structure analysis", timeout, scheduler)) { @@ -346,7 +351,8 @@ public FileStructureFinder findFileStructure(List explanation, int ideal Tuple sampleInfo = sampleFile(sampleReader, charsetName, MIN_SAMPLE_LINE_COUNT, Math.max(MIN_SAMPLE_LINE_COUNT, idealSampleLineCount), timeoutChecker); - return makeBestStructureFinder(explanation, sampleInfo.v1(), charsetName, sampleInfo.v2(), overrides, timeoutChecker); + return makeBestStructureFinder(explanation, sampleInfo.v1(), charsetName, sampleInfo.v2(), lineMergeSizeLimit, overrides, + timeoutChecker); } catch (Exception e) { // Add a dummy exception containing the explanation so far - this can be invaluable for troubleshooting as incorrect // decisions made early on in the structure analysis can result in seemingly crazy decisions or timeouts later on @@ -373,7 +379,8 @@ CharsetMatch findCharset(List explanation, InputStream inputStream, Time // Determine some extra characteristics of the input to compensate for some deficiencies of ICU4J boolean pureAscii = true; - boolean containsZeroBytes = false; + int evenPosZeroCount = 0; + int oddPosZeroCount = 0; inputStream.mark(BUFFER_SIZE); byte[] workspace = new byte[BUFFER_SIZE]; int remainingLength = BUFFER_SIZE; @@ -382,17 +389,22 @@ CharsetMatch findCharset(List explanation, InputStream inputStream, Time if (bytesRead <= 0) { break; } - for (int i = 0; i < bytesRead && containsZeroBytes == false; ++i) { + for (int i = 0; i < bytesRead; ++i) { if (workspace[i] == 0) { - containsZeroBytes = true; pureAscii = false; + if (i % 2 == 0) { + ++evenPosZeroCount; + } else { + ++oddPosZeroCount; + } } else { pureAscii = pureAscii && workspace[i] > 0 && workspace[i] < 128; } } remainingLength -= bytesRead; - } while (containsZeroBytes == false && remainingLength > 0); + } while (remainingLength > 0); inputStream.reset(); + boolean containsZeroBytes = evenPosZeroCount > 0 || oddPosZeroCount > 0; timeoutChecker.check("character set detection"); if (pureAscii) { @@ -433,6 +445,11 @@ CharsetMatch findCharset(List explanation, InputStream inputStream, Time if (containsZeroBytes && spaceEncodingContainsZeroByte == false) { explanation.add("Character encoding [" + name + "] matched the input with [" + charsetMatch.getConfidence() + "%] confidence but was rejected as the input contains zero bytes and the [" + name + "] encoding does not"); + } else if (containsZeroBytes && 3 * oddPosZeroCount > 2 * evenPosZeroCount && 3 * evenPosZeroCount > 2 * oddPosZeroCount) { + explanation.add("Character encoding [" + name + "] matched the input with [" + charsetMatch.getConfidence() + + "%] confidence but was rejected as the distribution of zero bytes between odd and even positions in the " + + "file is very close - [" + evenPosZeroCount + "] and [" + oddPosZeroCount + "] in the first [" + + (BUFFER_SIZE / 1024) + "kB] of input"); } else { explanation.add("Using character encoding [" + name + "], which matched the input with [" + charsetMatch.getConfidence() + "%] confidence"); @@ -450,7 +467,8 @@ CharsetMatch findCharset(List explanation, InputStream inputStream, Time } FileStructureFinder makeBestStructureFinder(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker, - FileStructureOverrides overrides, TimeoutChecker timeoutChecker) throws Exception { + int lineMergeSizeLimit, FileStructureOverrides overrides, + TimeoutChecker timeoutChecker) throws Exception { Character delimiter = overrides.getDelimiter(); Character quote = overrides.getQuote(); @@ -482,7 +500,8 @@ FileStructureFinder makeBestStructureFinder(List explanation, String sam for (FileStructureFinderFactory factory : factories) { timeoutChecker.check("high level format detection"); if (factory.canCreateFromSample(explanation, sample)) { - return factory.createFromSample(explanation, sample, charsetName, hasByteOrderMarker, overrides, timeoutChecker); + return factory.createFromSample(explanation, sample, charsetName, hasByteOrderMarker, lineMergeSizeLimit, overrides, + timeoutChecker); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinderFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinderFactory.java index 43612890bc8a8..6970af01bb79d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinderFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinderFactory.java @@ -68,7 +68,8 @@ DeprecationHandler.THROW_UNSUPPORTED_OPERATION, new ContextPrintingStringReader( @Override public FileStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker, - FileStructureOverrides overrides, TimeoutChecker timeoutChecker) throws IOException { + int lineMergeSizeLimit, FileStructureOverrides overrides, TimeoutChecker timeoutChecker) + throws IOException { return NdJsonFileStructureFinder.makeNdJsonFileStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, overrides, timeoutChecker); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java index d07eea15f973f..86b1d79b8b66b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.xpack.core.ml.action.FindFileStructureAction; import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; @@ -24,8 +25,8 @@ public class TextLogFileStructureFinder implements FileStructureFinder { private final FileStructure structure; static TextLogFileStructureFinder makeTextLogFileStructureFinder(List explanation, String sample, String charsetName, - Boolean hasByteOrderMarker, FileStructureOverrides overrides, - TimeoutChecker timeoutChecker) { + Boolean hasByteOrderMarker, int lineMergeSizeLimit, + FileStructureOverrides overrides, TimeoutChecker timeoutChecker) { String[] sampleLines = sample.split("\n"); TimestampFormatFinder timestampFormatFinder = populateTimestampFormatFinder(explanation, sampleLines, overrides, timeoutChecker); switch (timestampFormatFinder.getNumMatchedFormats()) { @@ -69,6 +70,16 @@ static TextLogFileStructureFinder makeTextLogFileStructureFinder(List ex // for the CSV header or lines before the first XML document starts) ++linesConsumed; } else { + // This check avoids subsequent problems when a massive message is unwieldy and slow to process + long lengthAfterAppend = message.length() + 1L + sampleLine.length(); + if (lengthAfterAppend > lineMergeSizeLimit) { + assert linesInMessage > 0; + throw new IllegalArgumentException("Merging lines into messages resulted in an unacceptably long message. " + + "Merged message would have [" + (linesInMessage + 1) + "] lines and [" + lengthAfterAppend + "] " + + "characters (limit [" + lineMergeSizeLimit + "]). If you have messages this big please increase " + + "the value of [" + FindFileStructureAction.Request.LINE_MERGE_SIZE_LIMIT + "]. Otherwise it " + + "probably means the timestamp has been incorrectly detected, so try overriding that."); + } message.append('\n').append(sampleLine); ++linesInMessage; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderFactory.java index 5931fea5f1abf..2980d5d0678ca 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderFactory.java @@ -41,8 +41,8 @@ public boolean canCreateFromSample(List explanation, String sample) { @Override public FileStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker, - FileStructureOverrides overrides, TimeoutChecker timeoutChecker) { + int lineMergeSizeLimit, FileStructureOverrides overrides, TimeoutChecker timeoutChecker) { return TextLogFileStructureFinder.makeTextLogFileStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, - overrides, timeoutChecker); + lineMergeSizeLimit, overrides, timeoutChecker); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactory.java index 97984d1d77560..382f2e7502719 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderFactory.java @@ -125,7 +125,7 @@ public boolean canCreateFromSample(List explanation, String sample) { @Override public FileStructureFinder createFromSample(List explanation, String sample, String charsetName, Boolean hasByteOrderMarker, - FileStructureOverrides overrides, TimeoutChecker timeoutChecker) + int lineMergeSizeLimit, FileStructureOverrides overrides, TimeoutChecker timeoutChecker) throws IOException, ParserConfigurationException, SAXException { return XmlFileStructureFinder.makeXmlFileStructureFinder(explanation, sample, charsetName, hasByteOrderMarker, overrides, timeoutChecker); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestFindFileStructureAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestFindFileStructureAction.java index 5810a2e929daa..03c3fb2a39f54 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestFindFileStructureAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestFindFileStructureAction.java @@ -53,6 +53,8 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient FindFileStructureAction.Request request = new FindFileStructureAction.Request(); request.setLinesToSample(restRequest.paramAsInt(FindFileStructureAction.Request.LINES_TO_SAMPLE.getPreferredName(), FileStructureFinderManager.DEFAULT_IDEAL_SAMPLE_LINE_COUNT)); + request.setLineMergeSizeLimit(restRequest.paramAsInt(FindFileStructureAction.Request.LINE_MERGE_SIZE_LIMIT.getPreferredName(), + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT)); request.setTimeout(TimeValue.parseTimeValue(restRequest.param(FindFileStructureAction.Request.TIMEOUT.getPreferredName()), DEFAULT_TIMEOUT, FindFileStructureAction.Request.TIMEOUT.getPreferredName())); request.setCharset(restRequest.param(FindFileStructureAction.Request.CHARSET.getPreferredName())); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java index 280a50324e447..7b157555eef91 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinderTests.java @@ -30,7 +30,7 @@ public void testCreateConfigsGivenCompleteCsv() throws Exception { String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker, - FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); FileStructure structure = structureFinder.getStructure(); @@ -64,8 +64,8 @@ public void testCreateConfigsGivenCompleteCsvAndColumnNamesOverride() throws Exc String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker, overrides, - NOOP_TIMEOUT_CHECKER); + FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker, + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, overrides, NOOP_TIMEOUT_CHECKER); FileStructure structure = structureFinder.getStructure(); @@ -101,8 +101,8 @@ public void testCreateConfigsGivenCompleteCsvAndHasHeaderRowOverride() throws Ex String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker, overrides, - NOOP_TIMEOUT_CHECKER); + FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker, + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, overrides, NOOP_TIMEOUT_CHECKER); FileStructure structure = structureFinder.getStructure(); @@ -135,7 +135,7 @@ public void testCreateConfigsGivenCsvWithIncompleteLastRecord() throws Exception String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker, - FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); FileStructure structure = structureFinder.getStructure(); @@ -170,7 +170,7 @@ public void testCreateConfigsGivenCsvWithTrailingNulls() throws Exception { String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker, - FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); FileStructure structure = structureFinder.getStructure(); @@ -214,8 +214,8 @@ public void testCreateConfigsGivenCsvWithTrailingNullsAndOverriddenTimeField() t String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker, overrides, - NOOP_TIMEOUT_CHECKER); + FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker, + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, overrides, NOOP_TIMEOUT_CHECKER); FileStructure structure = structureFinder.getStructure(); @@ -255,7 +255,7 @@ public void testCreateConfigsGivenCsvWithTrailingNullsExceptHeader() throws Exce String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker, - FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); FileStructure structure = structureFinder.getStructure(); @@ -301,8 +301,8 @@ public void testCreateConfigsGivenCsvWithTrailingNullsExceptHeaderAndColumnNames String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker, overrides, - NOOP_TIMEOUT_CHECKER); + FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker, + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, overrides, NOOP_TIMEOUT_CHECKER); FileStructure structure = structureFinder.getStructure(); @@ -340,7 +340,7 @@ public void testCreateConfigsGivenCsvWithTimeLastColumn() throws Exception { String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); FileStructureFinder structureFinder = csvFactory.createFromSample(explanation, sample, charset, hasByteOrderMarker, - FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); FileStructure structure = structureFinder.getStructure(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManagerTests.java index 978f1c5286de8..188bc9a628bd8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureFinderManagerTests.java @@ -14,6 +14,7 @@ import org.junit.Before; import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.PipedInputStream; @@ -25,6 +26,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.xpack.ml.filestructurefinder.FileStructureOverrides.EMPTY_OVERRIDES; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.startsWith; import static org.hamcrest.core.IsInstanceOf.instanceOf; @@ -54,7 +56,7 @@ public void testFindCharsetGivenCharacterWidths() throws Exception { } } - public void testFindCharsetGivenBinary() throws Exception { + public void testFindCharsetGivenRandomBinary() throws Exception { // This input should never match a single byte character set. ICU4J will sometimes decide // that it matches a double byte character set, hence the two assertion branches. @@ -73,9 +75,35 @@ public void testFindCharsetGivenBinary() throws Exception { } } + public void testFindCharsetGivenBinaryNearUtf16() throws Exception { + + // This input should never match a single byte character set. ICU4J will probably decide + // that it matches both UTF-16BE and UTF-16LE, but we should reject these as there's no + // clear winner. + ByteArrayOutputStream stream = new ByteArrayOutputStream(); + if (randomBoolean()) { + stream.write(randomAlphaOfLengthBetween(3, 4).getBytes(StandardCharsets.UTF_16LE)); + } + for (int i = 0; i < 50; ++i) { + stream.write(randomAlphaOfLengthBetween(5, 6).getBytes(StandardCharsets.UTF_16BE)); + stream.write(randomAlphaOfLengthBetween(5, 6).getBytes(StandardCharsets.UTF_16LE)); + } + if (randomBoolean()) { + stream.write(randomAlphaOfLengthBetween(3, 4).getBytes(StandardCharsets.UTF_16BE)); + } + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> structureFinderManager.findCharset(explanation, new ByteArrayInputStream(stream.toByteArray()), NOOP_TIMEOUT_CHECKER)); + + assertEquals("Could not determine a usable character encoding for the input - could it be binary data?", e.getMessage()); + assertThat(explanation.toString(), + containsString("but was rejected as the distribution of zero bytes between odd and even positions in the file is very close")); + } + public void testMakeBestStructureGivenNdJson() throws Exception { assertThat(structureFinderManager.makeBestStructureFinder(explanation, NDJSON_SAMPLE, StandardCharsets.UTF_8.name(), - randomBoolean(), EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER), instanceOf(NdJsonFileStructureFinder.class)); + randomBoolean(), FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER), + instanceOf(NdJsonFileStructureFinder.class)); } public void testMakeBestStructureGivenNdJsonAndDelimitedOverride() throws Exception { @@ -86,12 +114,14 @@ public void testMakeBestStructureGivenNdJsonAndDelimitedOverride() throws Except .setFormat(FileStructure.Format.DELIMITED).setQuote('\'').build(); assertThat(structureFinderManager.makeBestStructureFinder(explanation, NDJSON_SAMPLE, StandardCharsets.UTF_8.name(), - randomBoolean(), overrides, NOOP_TIMEOUT_CHECKER), instanceOf(DelimitedFileStructureFinder.class)); + randomBoolean(), FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, overrides, NOOP_TIMEOUT_CHECKER), + instanceOf(DelimitedFileStructureFinder.class)); } public void testMakeBestStructureGivenXml() throws Exception { assertThat(structureFinderManager.makeBestStructureFinder(explanation, XML_SAMPLE, StandardCharsets.UTF_8.name(), randomBoolean(), - EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER), instanceOf(XmlFileStructureFinder.class)); + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER), + instanceOf(XmlFileStructureFinder.class)); } public void testMakeBestStructureGivenXmlAndTextOverride() throws Exception { @@ -99,12 +129,14 @@ public void testMakeBestStructureGivenXmlAndTextOverride() throws Exception { FileStructureOverrides overrides = FileStructureOverrides.builder().setFormat(FileStructure.Format.SEMI_STRUCTURED_TEXT).build(); assertThat(structureFinderManager.makeBestStructureFinder(explanation, XML_SAMPLE, StandardCharsets.UTF_8.name(), randomBoolean(), - overrides, NOOP_TIMEOUT_CHECKER), instanceOf(TextLogFileStructureFinder.class)); + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, overrides, NOOP_TIMEOUT_CHECKER), + instanceOf(TextLogFileStructureFinder.class)); } public void testMakeBestStructureGivenCsv() throws Exception { assertThat(structureFinderManager.makeBestStructureFinder(explanation, CSV_SAMPLE, StandardCharsets.UTF_8.name(), randomBoolean(), - EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER), instanceOf(DelimitedFileStructureFinder.class)); + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER), + instanceOf(DelimitedFileStructureFinder.class)); } public void testMakeBestStructureGivenCsvAndJsonOverride() { @@ -113,14 +145,15 @@ public void testMakeBestStructureGivenCsvAndJsonOverride() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> structureFinderManager.makeBestStructureFinder(explanation, CSV_SAMPLE, StandardCharsets.UTF_8.name(), randomBoolean(), - overrides, NOOP_TIMEOUT_CHECKER)); + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, overrides, NOOP_TIMEOUT_CHECKER)); assertEquals("Input did not match the specified format [ndjson]", e.getMessage()); } public void testMakeBestStructureGivenText() throws Exception { assertThat(structureFinderManager.makeBestStructureFinder(explanation, TEXT_SAMPLE, StandardCharsets.UTF_8.name(), randomBoolean(), - EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER), instanceOf(TextLogFileStructureFinder.class)); + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER), + instanceOf(TextLogFileStructureFinder.class)); } public void testMakeBestStructureGivenTextAndDelimitedOverride() throws Exception { @@ -130,7 +163,8 @@ public void testMakeBestStructureGivenTextAndDelimitedOverride() throws Exceptio .setFormat(FileStructure.Format.DELIMITED).setDelimiter(':').build(); assertThat(structureFinderManager.makeBestStructureFinder(explanation, TEXT_SAMPLE, StandardCharsets.UTF_8.name(), randomBoolean(), - overrides, NOOP_TIMEOUT_CHECKER), instanceOf(DelimitedFileStructureFinder.class)); + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, overrides, NOOP_TIMEOUT_CHECKER), + instanceOf(DelimitedFileStructureFinder.class)); } public void testFindFileStructureTimeout() throws IOException, InterruptedException { @@ -163,7 +197,8 @@ public void testFindFileStructureTimeout() throws IOException, InterruptedExcept junkProducer.start(); ElasticsearchTimeoutException e = expectThrows(ElasticsearchTimeoutException.class, - () -> structureFinderManager.findFileStructure(explanation, linesOfJunk - 1, bigInput, EMPTY_OVERRIDES, timeout)); + () -> structureFinderManager.findFileStructure(explanation, FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, + linesOfJunk - 1, bigInput, EMPTY_OVERRIDES, timeout)); assertThat(e.getMessage(), startsWith("Aborting structure analysis during [")); assertThat(e.getMessage(), endsWith("] as it has taken longer than the timeout of [" + timeout + "]")); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinderTests.java index a220bdf3b0690..048d2708e7740 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinderTests.java @@ -19,7 +19,7 @@ public void testCreateConfigsGivenGoodJson() throws Exception { String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); FileStructureFinder structureFinder = factory.createFromSample(explanation, NDJSON_SAMPLE, charset, hasByteOrderMarker, - FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); FileStructure structure = structureFinder.getStructure(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java index 6ac672f61780e..4c921c8a9f9ba 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java @@ -20,13 +20,36 @@ public class TextLogFileStructureFinderTests extends FileStructureTestCase { private FileStructureFinderFactory factory = new TextLogFileStructureFinderFactory(); + public void testCreateConfigsGivenLowLineMergeSizeLimit() { + + String sample = "2019-05-16 16:56:14 line 1 abcdefghijklmnopqrstuvwxyz\n" + + "2019-05-16 16:56:14 line 2 abcdefghijklmnopqrstuvwxyz\n" + + "continuation line 2.1\n" + + "continuation line 2.2\n" + + "continuation line 2.3\n" + + "continuation line 2.4\n" + + "2019-05-16 16:56:14 line 3 abcdefghijklmnopqrstuvwxyz\n"; + + assertTrue(factory.canCreateFromSample(explanation, sample)); + + String charset = randomFrom(POSSIBLE_CHARSETS); + Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> factory.createFromSample(explanation, sample, charset, hasByteOrderMarker, 100, + FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER)); + + assertEquals("Merging lines into messages resulted in an unacceptably long message. Merged message would have [4] lines and " + + "[119] characters (limit [100]). If you have messages this big please increase the value of [line_merge_size_limit]. " + + "Otherwise it probably means the timestamp has been incorrectly detected, so try overriding that.", e.getMessage()); + } + public void testCreateConfigsGivenElasticsearchLog() throws Exception { assertTrue(factory.canCreateFromSample(explanation, TEXT_SAMPLE)); String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); FileStructureFinder structureFinder = factory.createFromSample(explanation, TEXT_SAMPLE, charset, hasByteOrderMarker, - FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); FileStructure structure = structureFinder.getStructure(); @@ -66,8 +89,8 @@ public void testCreateConfigsGivenElasticsearchLogAndTimestampFormatOverride() t String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - FileStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker, overrides, - NOOP_TIMEOUT_CHECKER); + FileStructureFinder structureFinder = factory.createFromSample(explanation, sample, charset, hasByteOrderMarker, + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, overrides, NOOP_TIMEOUT_CHECKER); FileStructure structure = structureFinder.getStructure(); @@ -102,8 +125,8 @@ public void testCreateConfigsGivenElasticsearchLogAndTimestampFieldOverride() th String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - FileStructureFinder structureFinder = factory.createFromSample(explanation, TEXT_SAMPLE, charset, hasByteOrderMarker, overrides, - NOOP_TIMEOUT_CHECKER); + FileStructureFinder structureFinder = factory.createFromSample(explanation, TEXT_SAMPLE, charset, hasByteOrderMarker, + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, overrides, NOOP_TIMEOUT_CHECKER); FileStructure structure = structureFinder.getStructure(); @@ -139,8 +162,8 @@ public void testCreateConfigsGivenElasticsearchLogAndGrokPatternOverride() throw String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); - FileStructureFinder structureFinder = factory.createFromSample(explanation, TEXT_SAMPLE, charset, hasByteOrderMarker, overrides, - NOOP_TIMEOUT_CHECKER); + FileStructureFinder structureFinder = factory.createFromSample(explanation, TEXT_SAMPLE, charset, hasByteOrderMarker, + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, overrides, NOOP_TIMEOUT_CHECKER); FileStructure structure = structureFinder.getStructure(); @@ -181,7 +204,8 @@ public void testCreateConfigsGivenElasticsearchLogAndImpossibleGrokPatternOverri String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> factory.createFromSample(explanation, TEXT_SAMPLE, charset, hasByteOrderMarker, overrides, NOOP_TIMEOUT_CHECKER)); + () -> factory.createFromSample(explanation, TEXT_SAMPLE, charset, hasByteOrderMarker, + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, overrides, NOOP_TIMEOUT_CHECKER)); assertEquals("Supplied Grok pattern [\\[%{LOGLEVEL:loglevel} *\\]\\[%{HOSTNAME:node}\\]\\[%{TIMESTAMP_ISO8601:timestamp}\\] " + "\\[%{JAVACLASS:class} *\\] %{JAVALOGMESSAGE:message}] does not match sample messages", e.getMessage()); @@ -200,8 +224,8 @@ public void testErrorOnIncorrectMessageFormation() { String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> factory.createFromSample(explanation, sample, charset, hasByteOrderMarker, FileStructureOverrides.EMPTY_OVERRIDES, - NOOP_TIMEOUT_CHECKER)); + () -> factory.createFromSample(explanation, sample, charset, hasByteOrderMarker, + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER)); assertEquals("Failed to create more than one message from the sample lines provided. (The last is discarded in " + "case the sample is incomplete.) If your sample does contain multiple messages the problem is probably that " diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderTests.java index b6f93a6e39b1d..9ad07f6142782 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinderTests.java @@ -19,7 +19,7 @@ public void testCreateConfigsGivenGoodXml() throws Exception { String charset = randomFrom(POSSIBLE_CHARSETS); Boolean hasByteOrderMarker = randomHasByteOrderMarker(charset); FileStructureFinder structureFinder = factory.createFromSample(explanation, XML_SAMPLE, charset, hasByteOrderMarker, - FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); + FileStructureFinderManager.DEFAULT_LINE_MERGE_SIZE_LIMIT, FileStructureOverrides.EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); FileStructure structure = structureFinder.getStructure(); diff --git a/x-pack/plugin/monitoring/build.gradle b/x-pack/plugin/monitoring/build.gradle index cb6395b18a4c2..b4bcc6078d5db 100644 --- a/x-pack/plugin/monitoring/build.gradle +++ b/x-pack/plugin/monitoring/build.gradle @@ -10,7 +10,6 @@ esplugin { archivesBaseName = 'x-pack-monitoring' dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') if (isEclipse) { @@ -18,13 +17,13 @@ dependencies { } // monitoring deps - compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" - compile "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}" + compile project(':client:rest') + compile project(':client:sniffer') // baz - this goes away after we separate out the actions #27759 - testCompile "org.elasticsearch.plugin:x-pack-watcher:${version}" + testCompile project(xpackModule('watcher')) - testCompile "org.elasticsearch.plugin:x-pack-ilm:${version}" + testCompile project(xpackModule('ilm')) } compileJava.options.compilerArgs << "-Xlint:-rawtypes,-unchecked" diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollector.java index 6aab3114b7807..8f8072ef5b864 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ccr/StatsCollector.java @@ -14,10 +14,8 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; -import org.elasticsearch.xpack.core.ccr.client.CcrClient; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; import org.elasticsearch.xpack.monitoring.collector.Collector; @@ -37,25 +35,25 @@ public final class StatsCollector extends Collector { private final Settings settings; private final ThreadContext threadContext; - private final CcrClient ccrClient; + private final Client client; public StatsCollector( final Settings settings, final ClusterService clusterService, final XPackLicenseState licenseState, final Client client) { - this(settings, clusterService, licenseState, new XPackClient(client).ccr(), client.threadPool().getThreadContext()); + this(settings, clusterService, licenseState, client, client.threadPool().getThreadContext()); } StatsCollector( final Settings settings, final ClusterService clusterService, final XPackLicenseState licenseState, - final CcrClient ccrClient, + final Client client, final ThreadContext threadContext) { super(TYPE, clusterService, CCR_STATS_TIMEOUT, licenseState); this.settings = settings; - this.ccrClient = ccrClient; + this.client = client; this.threadContext = threadContext; } @@ -79,7 +77,7 @@ protected Collection doCollect( final String clusterUuid = clusterUuid(clusterState); final CcrStatsAction.Request request = new CcrStatsAction.Request(); - final CcrStatsAction.Response response = ccrClient.stats(request).actionGet(getCollectionTimeout()); + final CcrStatsAction.Response response = client.execute(CcrStatsAction.INSTANCE, request).actionGet(getCollectionTimeout()); final AutoFollowStatsMonitoringDoc autoFollowStatsDoc = new AutoFollowStatsMonitoringDoc(clusterUuid, timestamp, interval, node, response.getAutoFollowStats()); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollector.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollector.java index 855780d4836ae..be34af2850ec6 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollector.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollector.java @@ -14,10 +14,8 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; -import org.elasticsearch.xpack.core.ml.client.MachineLearningClient; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; import org.elasticsearch.xpack.monitoring.collector.Collector; @@ -43,15 +41,15 @@ public class JobStatsCollector extends Collector { private final Settings settings; private final ThreadContext threadContext; - private final MachineLearningClient client; + private final Client client; public JobStatsCollector(final Settings settings, final ClusterService clusterService, final XPackLicenseState licenseState, final Client client) { - this(settings, clusterService, licenseState, new XPackClient(client).machineLearning(), client.threadPool().getThreadContext()); + this(settings, clusterService, licenseState, client, client.threadPool().getThreadContext()); } JobStatsCollector(final Settings settings, final ClusterService clusterService, - final XPackLicenseState licenseState, final MachineLearningClient client, final ThreadContext threadContext) { + final XPackLicenseState licenseState, final Client client, final ThreadContext threadContext) { super(JobStatsMonitoringDoc.TYPE, clusterService, JOB_STATS_TIMEOUT, licenseState); this.settings = settings; this.client = client; @@ -74,7 +72,7 @@ protected List doCollect(final MonitoringDoc.Node node, // fetch details about all jobs try (ThreadContext.StoredContext ignore = threadContext.stashWithOrigin(MONITORING_ORIGIN)) { final GetJobsStatsAction.Response jobs = - client.getJobsStats(new GetJobsStatsAction.Request(MetaData.ALL)) + client.execute(GetJobsStatsAction.INSTANCE, new GetJobsStatsAction.Request(MetaData.ALL)) .actionGet(getCollectionTimeout()); final long timestamp = timestamp(); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java index 12f2f77a9d40c..8661c77345591 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporter.java @@ -40,7 +40,6 @@ import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils; @@ -431,8 +430,7 @@ private boolean hasValidVersion(final Object version, final long minimumVersion) */ private void getClusterAlertsInstallationAsyncActions(final boolean indexExists, final List asyncActions, final AtomicInteger pendingResponses) { - final XPackClient xpackClient = new XPackClient(client); - final WatcherClient watcher = xpackClient.watcher(); + final WatcherClient watcher = new WatcherClient(client); final boolean canAddWatches = licenseState.isMonitoringClusterAlertsAllowed(); for (final String watchId : ClusterAlertsUtil.WATCH_IDS) { diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java index 0a1576393ba26..04faf82d8c1d1 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkAction.java @@ -8,21 +8,22 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestBuilderListener; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkRequestBuilder; import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkResponse; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils; -import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; import java.util.Arrays; @@ -34,7 +35,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; -public class RestMonitoringBulkAction extends XPackRestHandler { +public class RestMonitoringBulkAction extends BaseRestHandler { public static final String MONITORING_ID = "system_id"; public static final String MONITORING_VERSION = "system_api_version"; @@ -68,7 +69,7 @@ public String getName() { } @Override - public RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final String id = request.param(MONITORING_ID); if (Strings.isEmpty(id)) { @@ -98,27 +99,9 @@ public RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient cli final long timestamp = System.currentTimeMillis(); final long intervalMillis = parseTimeValue(intervalAsString, INTERVAL).getMillis(); - final MonitoringBulkRequestBuilder requestBuilder = client.monitoring().prepareMonitoringBulk(); + final MonitoringBulkRequestBuilder requestBuilder = new MonitoringBulkRequestBuilder(client); requestBuilder.add(system, request.content(), request.getXContentType(), timestamp, intervalMillis); - return channel -> requestBuilder.execute(new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(MonitoringBulkResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - { - builder.field("took", response.getTookInMillis()); - builder.field("ignored", response.isIgnored()); - - final MonitoringBulkResponse.Error error = response.getError(); - builder.field("errors", error != null); - - if (error != null) { - builder.field("error", response.getError()); - } - } - builder.endObject(); - return new BytesRestResponse(response.status(), builder); - } - }); + return channel -> requestBuilder.execute(getRestBuilderListener(channel)); } @Override @@ -138,4 +121,26 @@ private boolean isSupportedSystemVersion(final MonitoredSystem system, final Str final List monitoredSystem = supportedApiVersions.getOrDefault(system, emptyList()); return monitoredSystem.contains(version); } + + static RestBuilderListener getRestBuilderListener(RestChannel channel) { + return new RestBuilderListener<>(channel) { + @Override + public RestResponse buildResponse(MonitoringBulkResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + { + builder.field("took", response.getTookInMillis()); + builder.field("ignored", response.isIgnored()); + + final MonitoringBulkResponse.Error error = response.getError(); + builder.field("errors", error != null); + + if (error != null) { + builder.field("error", response.getError()); + } + } + builder.endObject(); + return new BytesRestResponse(response.status(), builder); + } + }; + } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollectorTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollectorTests.java index 0713e26f80667..61b485e77ae27 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollectorTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ml/JobStatsCollectorTests.java @@ -6,16 +6,17 @@ package org.elasticsearch.xpack.monitoring.collector.ml; import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.action.util.QueryPage; +import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction.Request; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction.Response; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction.Response.JobStats; -import org.elasticsearch.xpack.core.action.util.QueryPage; -import org.elasticsearch.xpack.core.ml.client.MachineLearningClient; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringDoc; @@ -128,7 +129,7 @@ public void testDoCollect() throws Exception { whenClusterStateWithUUID(clusterUuid); final MonitoringDoc.Node node = randomMonitoringNode(random()); - final MachineLearningClient client = mock(MachineLearningClient.class); + final Client client = mock(Client.class); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(1, 120)); @@ -143,7 +144,7 @@ public void testDoCollect() throws Exception { final ActionFuture future = (ActionFuture)mock(ActionFuture.class); final Response response = new Response(new QueryPage<>(jobStats, jobStats.size(), Job.RESULTS_FIELD)); - when(client.getJobsStats(eq(new Request(MetaData.ALL)))).thenReturn(future); + when(client.execute(eq(GetJobsStatsAction.INSTANCE), eq(new Request(MetaData.ALL)))).thenReturn(future); when(future.actionGet(timeout)).thenReturn(response); final long interval = randomNonNegativeLong(); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java index 5db71f72cf6ef..137721f84462b 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java @@ -105,7 +105,7 @@ public void testExport() throws Exception { } assertBusy(() -> { - MonitoringBulkRequestBuilder bulk = monitoringClient().prepareMonitoringBulk(); + MonitoringBulkRequestBuilder bulk = new MonitoringBulkRequestBuilder(client()); monitoringDocs.forEach(bulk::add); assertEquals(RestStatus.OK, bulk.get().status()); refresh(); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkActionTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkActionTests.java index 7a4427c9f0fdc..fb79751f797e6 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkActionTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/rest/action/RestMonitoringBulkActionTests.java @@ -6,8 +6,7 @@ package org.elasticsearch.xpack.monitoring.rest.action; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -25,22 +24,16 @@ import org.elasticsearch.rest.action.RestBuilderListener; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; -import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkRequestBuilder; import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkResponse; -import org.elasticsearch.xpack.core.monitoring.client.MonitoringClient; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils; import java.util.HashMap; import java.util.Map; -import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils.TEMPLATE_VERSION; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -115,8 +108,7 @@ public void testUnknownSystemVersion() { public void testNoErrors() throws Exception { final MonitoringBulkResponse response = new MonitoringBulkResponse(randomLong(), false); - final FakeRestRequest request = createRestRequest(randomSystemId(), TEMPLATE_VERSION, "10s"); - final RestResponse restResponse = getRestBuilderListener(request).buildResponse(response); + final RestResponse restResponse = getRestBuilderListener().buildResponse(response); assertThat(restResponse.status(), is(RestStatus.OK)); assertThat(restResponse.content().utf8ToString(), @@ -125,8 +117,7 @@ public void testNoErrors() throws Exception { public void testNoErrorsButIgnored() throws Exception { final MonitoringBulkResponse response = new MonitoringBulkResponse(randomLong(), true); - final FakeRestRequest request = createRestRequest(randomSystemId(), TEMPLATE_VERSION, "10s"); - final RestResponse restResponse = getRestBuilderListener(request).buildResponse(response); + final RestResponse restResponse = getRestBuilderListener().buildResponse(response); assertThat(restResponse.status(), is(RestStatus.OK)); assertThat(restResponse.content().utf8ToString(), @@ -139,8 +130,7 @@ public void testWithErrors() throws Exception { final MonitoringBulkResponse response = new MonitoringBulkResponse(randomLong(), error); final String errorJson; - final FakeRestRequest request = createRestRequest(randomSystemId(), TEMPLATE_VERSION, "10s"); - final RestResponse restResponse = getRestBuilderListener(request).buildResponse(response); + final RestResponse restResponse = getRestBuilderListener().buildResponse(response); try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { error.toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -167,35 +157,18 @@ private static String randomSystemId() { } private void prepareRequest(final RestRequest restRequest) throws Exception { - getRestBuilderListener(restRequest); - } - - private RestBuilderListener getRestBuilderListener(final RestRequest restRequest) throws Exception { - final Client client = mock(Client.class); - final XPackClient xpackClient = mock(XPackClient.class); - final MonitoringClient monitoringClient = mock(MonitoringClient.class); - final AtomicReference> listenerReference = new AtomicReference<>(); - final MonitoringBulkRequestBuilder builder = new MonitoringBulkRequestBuilder(client){ - @SuppressWarnings("unchecked") - @Override - public void execute(ActionListener listener) { - listenerReference.set((RestBuilderListener)listener); - } - }; - when(monitoringClient.prepareMonitoringBulk()).thenReturn(builder); - when(xpackClient.monitoring()).thenReturn(monitoringClient); - - final CheckedConsumer consumer = action.doPrepareRequest(restRequest, xpackClient); - + final NodeClient client = mock(NodeClient.class); + final CheckedConsumer consumer = action.prepareRequest(restRequest, client); final RestChannel channel = mock(RestChannel.class); when(channel.newBuilder()).thenReturn(JsonXContent.contentBuilder()); - - // trigger/capture execution + // trigger execution consumer.accept(channel); + } - assertThat(listenerReference.get(), not(nullValue())); - - return listenerReference.get(); + private RestBuilderListener getRestBuilderListener() throws Exception { + final RestChannel channel = mock(RestChannel.class); + when(channel.newBuilder()).thenReturn(JsonXContent.contentBuilder()); + return RestMonitoringBulkAction.getRestBuilderListener(channel); } private static FakeRestRequest createRestRequest(final String systemId, final String systemApiVersion, final String interval) { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java index c350b9a374ab2..6917fd5c5697e 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/test/MonitoringIntegTestCase.java @@ -19,8 +19,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.xpack.core.XPackClient; -import org.elasticsearch.xpack.core.monitoring.client.MonitoringClient; import org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils; import org.elasticsearch.xpack.core.monitoring.test.MockPainlessScriptEngine; import org.elasticsearch.xpack.monitoring.LocalStateMonitoring; @@ -75,10 +73,6 @@ protected Collection> nodePlugins() { MockIngestPlugin.class, CommonAnalysisPlugin.class); } - protected MonitoringClient monitoringClient() { - return randomBoolean() ? new XPackClient(client()).monitoring() : new MonitoringClient(client()); - } - @Override protected Set excludeTemplates() { return new HashSet<>(monitoringTemplateNames()); diff --git a/x-pack/plugin/rollup/build.gradle b/x-pack/plugin/rollup/build.gradle index ae53f78dad2ff..39afbd4349606 100644 --- a/x-pack/plugin/rollup/build.gradle +++ b/x-pack/plugin/rollup/build.gradle @@ -14,9 +14,8 @@ compileTestJava.options.compilerArgs << "-Xlint:-rawtypes" dependencies { - compileOnly "org.elasticsearch:elasticsearch:${version}" - - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(":server") + compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') if (isEclipse) { diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index 93d7c5065cc29..0421f72aaf5b8 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -211,8 +211,10 @@ public RollupJobConfig getConfig() { } /** - * Attempt to start the indexer. If the state is anything other than STOPPED, this will fail. - * Otherwise, the persistent task's status will be updated to reflect the change. + * Attempt to start the indexer. + * - If the indexer is started/indexing, returns OK + * - If the indexer is stopped, starts task, updates persistent task's status, returns ok + * - Anything else returns error * * Note that while the job is started, the indexer will not necessarily run immediately. That * will only occur when the scheduler triggers it based on the cron @@ -221,8 +223,14 @@ public RollupJobConfig getConfig() { */ public synchronized void start(ActionListener listener) { final IndexerState prevState = indexer.getState(); - if (prevState != IndexerState.STOPPED) { - // fails if the task is not STOPPED + + if (prevState == IndexerState.STARTED || prevState == IndexerState.INDEXING) { + // We're already running so just return acknowledgement + logger.debug("Indexer already running (State: [" + prevState + "]), acknowledging start without change."); + listener.onResponse(new StartRollupJobAction.Response(true)); + return; + } else if (prevState != IndexerState.STOPPED) { + // if we're not already started/indexing, we must be STOPPED to get started listener.onFailure(new ElasticsearchException("Cannot start task for Rollup Job [" + job.getConfig().getId() + "] because" + " state was [" + prevState + "]")); return; @@ -231,11 +239,10 @@ public synchronized void start(ActionListener lis final IndexerState newState = indexer.start(); if (newState != IndexerState.STARTED) { listener.onFailure(new ElasticsearchException("Cannot start task for Rollup Job [" + job.getConfig().getId() + "] because" - + " state was [" + newState + "]")); + + " new state was [" + newState + "]")); return; } - final RollupJobStatus state = new RollupJobStatus(IndexerState.STARTED, indexer.getPosition()); logger.debug("Updating state for rollup job [" + job.getConfig().getId() + "] to [" + state.getIndexerState() + "][" + state.getPosition() + "]"); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java index 85962a5cfdbcf..a32cd975b6411 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java @@ -163,17 +163,16 @@ public void testStartWhenStarted() throws InterruptedException { assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); CountDownLatch latch = new CountDownLatch(1); - task.start(new ActionListener() { + task.start(new ActionListener<>() { @Override public void onResponse(StartRollupJobAction.Response response) { - fail("Should not have entered onResponse."); + assertTrue(response.isStarted()); + latch.countDown(); } @Override public void onFailure(Exception e) { - assertThat(e.getMessage(), equalTo("Cannot start task for Rollup Job [" - + job.getConfig().getId() + "] because state was [STARTED]")); - latch.countDown(); + fail("Should not have throw exception: " + e.getMessage()); } }); latch.await(3, TimeUnit.SECONDS); diff --git a/x-pack/plugin/security/build.gradle b/x-pack/plugin/security/build.gradle index cc5833d20e440..8d6fde6845da4 100644 --- a/x-pack/plugin/security/build.gradle +++ b/x-pack/plugin/security/build.gradle @@ -13,7 +13,6 @@ esplugin { archivesBaseName = 'x-pack-security' dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here compileOnly project(path: xpackModule('core'), configuration: 'default') compileOnly project(path: ':modules:transport-netty4', configuration: 'runtime') compileOnly project(path: ':plugins:transport-nio', configuration: 'runtime') diff --git a/x-pack/plugin/security/cli/build.gradle b/x-pack/plugin/security/cli/build.gradle index 5a95594b292ed..0097adf30f511 100644 --- a/x-pack/plugin/security/cli/build.gradle +++ b/x-pack/plugin/security/cli/build.gradle @@ -5,13 +5,12 @@ apply plugin: 'elasticsearch.build' archivesBaseName = 'elasticsearch-security-cli' dependencies { - compileOnly "org.elasticsearch:elasticsearch:${version}" - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(":server") compileOnly project(path: xpackModule('core'), configuration: 'default') compile "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}" compile "org.bouncycastle:bcprov-jdk15on:${versions.bouncycastle}" testImplementation 'com.google.jimfs:jimfs:1.1' - testCompile "org.elasticsearch.test:framework:${version}" + testCompile project(":test:framework") testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') if (isEclipse) { testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') diff --git a/x-pack/plugin/security/qa/basic-enable-security/build.gradle b/x-pack/plugin/security/qa/basic-enable-security/build.gradle index a21e3c68d3fc4..27532cfb7f155 100644 --- a/x-pack/plugin/security/qa/basic-enable-security/build.gradle +++ b/x-pack/plugin/security/qa/basic-enable-security/build.gradle @@ -4,7 +4,6 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') diff --git a/x-pack/plugin/security/qa/security-basic/build.gradle b/x-pack/plugin/security/qa/security-basic/build.gradle index 864a1e5180934..e005aeb9e8cff 100644 --- a/x-pack/plugin/security/qa/security-basic/build.gradle +++ b/x-pack/plugin/security/qa/security-basic/build.gradle @@ -4,7 +4,6 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') diff --git a/x-pack/plugin/security/qa/tls-basic/build.gradle b/x-pack/plugin/security/qa/tls-basic/build.gradle index 6487475f7c8c3..e9c0f636e2425 100644 --- a/x-pack/plugin/security/qa/tls-basic/build.gradle +++ b/x-pack/plugin/security/qa/tls-basic/build.gradle @@ -4,7 +4,6 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 52d40d86d53bc..3e95f4a10d01e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -40,7 +40,6 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.http.HttpServerTransport; @@ -977,36 +976,12 @@ public Function> getFieldFilter() { @Override public BiConsumer getJoinValidator() { if (enabled) { - return new ValidateTLSOnJoin(XPackSettings.TRANSPORT_SSL_ENABLED.get(settings), - DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings), settings) - .andThen(new ValidateUpgradedSecurityIndex()) + return new ValidateUpgradedSecurityIndex() .andThen(new ValidateLicenseForFIPS(XPackSettings.FIPS_MODE_ENABLED.get(settings))); } return null; } - static final class ValidateTLSOnJoin implements BiConsumer { - private final boolean isTLSEnabled; - private final String discoveryType; - private final Settings settings; - - ValidateTLSOnJoin(boolean isTLSEnabled, String discoveryType, Settings settings) { - this.isTLSEnabled = isTLSEnabled; - this.discoveryType = discoveryType; - this.settings = settings; - } - - @Override - public void accept(DiscoveryNode node, ClusterState state) { - License license = LicenseService.getLicense(state.metaData()); - if (isTLSEnabled == false && "single-node".equals(discoveryType) == false - && XPackLicenseState.isTransportTlsRequired(license, settings)) { - throw new IllegalStateException("Transport TLS ([" + XPackSettings.TRANSPORT_SSL_ENABLED.getKey() + - "]) is required for license type [" + license.operationMode().description() + "] when security is enabled"); - } - } - } - static final class ValidateUpgradedSecurityIndex implements BiConsumer { @Override public void accept(DiscoveryNode node, ClusterState state) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/SecurityActionMapper.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/SecurityActionMapper.java index 409317bbf89cc..f4eb97fcb3472 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/SecurityActionMapper.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/SecurityActionMapper.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.security.action; import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.transport.TransportRequest; @@ -36,8 +35,8 @@ public String action(String action, TransportRequest request) { break; case AnalyzeAction.NAME: case AnalyzeAction.NAME + "[s]": - assert request instanceof AnalyzeRequest; - String[] indices = ((AnalyzeRequest) request).indices(); + assert request instanceof AnalyzeAction.Request; + String[] indices = ((AnalyzeAction.Request) request).indices(); if (indices == null || (indices.length == 1 && indices[0] == null)) { return CLUSTER_PERMISSION_ANALYZE; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 768bc38813c0b..4d8dca8e095a8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -65,7 +65,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; -import static org.elasticsearch.discovery.DiscoveryModule.ZEN2_DISCOVERY_TYPE; import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_MAIN_ALIAS; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_MAIN_INDEX_FORMAT; import static org.hamcrest.Matchers.containsString; @@ -243,61 +242,6 @@ public void testJoinValidatorOnDisabledSecurity() throws Exception { assertNull(joinValidator); } - public void testTLSJoinValidator() throws Exception { - createComponents(Settings.EMPTY); - BiConsumer joinValidator = security.getJoinValidator(); - assertNotNull(joinValidator); - DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); - joinValidator.accept(node, ClusterState.builder(ClusterName.DEFAULT).build()); - int numIters = randomIntBetween(1, 10); - for (int i = 0; i < numIters; i++) { - boolean tlsOn = randomBoolean(); - boolean securityExplicitlyEnabled = randomBoolean(); - String discoveryType = randomFrom("single-node", ZEN2_DISCOVERY_TYPE, randomAlphaOfLength(4)); - - final Settings settings; - if (securityExplicitlyEnabled) { - settings = Settings.builder().put("xpack.security.enabled", true).build(); - } else { - settings = Settings.EMPTY; - } - Security.ValidateTLSOnJoin validator = new Security.ValidateTLSOnJoin(tlsOn, discoveryType, settings); - MetaData.Builder builder = MetaData.builder(); - License.OperationMode licenseMode = randomFrom(License.OperationMode.values()); - License license = TestUtils.generateSignedLicense(licenseMode.description(), TimeValue.timeValueHours(24)); - TestUtils.putLicense(builder, license); - ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(builder.build()).build(); - - final boolean expectFailure; - switch (licenseMode) { - case PLATINUM: - case GOLD: - case STANDARD: - expectFailure = tlsOn == false && "single-node".equals(discoveryType) == false; - break; - case BASIC: - expectFailure = tlsOn == false && "single-node".equals(discoveryType) == false && securityExplicitlyEnabled; - break; - case MISSING: - case TRIAL: - expectFailure = false; - break; - default: - throw new AssertionError("unknown operation mode [" + license.operationMode() + "]"); - } - logger.info("Test TLS join; Lic:{} TLS:{} Disco:{} Settings:{} ; Expect Failure: {}", - licenseMode, tlsOn, discoveryType, settings.toDelimitedString(','), expectFailure); - if (expectFailure) { - IllegalStateException ise = expectThrows(IllegalStateException.class, () -> validator.accept(node, state)); - assertEquals("Transport TLS ([xpack.security.transport.ssl.enabled]) is required for license type [" - + license.operationMode().description() + "] when security is enabled", ise.getMessage()); - } else { - validator.accept(node, state); - } - validator.accept(node, ClusterState.builder(ClusterName.DEFAULT).metaData(MetaData.builder().build()).build()); - } - } - public void testJoinValidatorForFIPSLicense() throws Exception { DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), VersionUtils.randomVersionBetween(random(), null, Version.CURRENT)); @@ -318,29 +262,6 @@ public void testJoinValidatorForFIPSLicense() throws Exception { } } - public void testIndexJoinValidator_Old_And_Rolling() throws Exception { - createComponents(Settings.EMPTY); - BiConsumer joinValidator = security.getJoinValidator(); - assertNotNull(joinValidator); - Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), - VersionUtils.getPreviousVersion(Version.V_7_0_0)); - DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); - IndexMetaData indexMetaData = IndexMetaData.builder(SECURITY_MAIN_ALIAS) - .settings(settings(version) - .put(INDEX_FORMAT_SETTING.getKey(), INTERNAL_MAIN_INDEX_FORMAT - 1)) - .numberOfShards(1).numberOfReplicas(0) - .build(); - DiscoveryNode existingOtherNode = new DiscoveryNode("bar", buildNewFakeTransportAddress(), version); - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(existingOtherNode).build(); - ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) - .nodes(discoveryNodes) - .metaData(MetaData.builder().put(indexMetaData, true).build()).build(); - IllegalStateException e = expectThrows(IllegalStateException.class, - () -> joinValidator.accept(node, clusterState)); - assertThat(e.getMessage(), equalTo("Security index is not on the current version [6] - " + - "The Upgrade API must be run for 7.x nodes to join the cluster")); - } - public void testIndexJoinValidator_FullyCurrentCluster() throws Exception { createComponents(Settings.EMPTY); BiConsumer joinValidator = security.getJoinValidator(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/SecurityActionMapperTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/SecurityActionMapperTests.java index 6efb293f7b201..ef063c93961e4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/SecurityActionMapperTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/SecurityActionMapperTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.security.action; import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; -import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.test.ESTestCase; @@ -73,11 +72,11 @@ public void testClearScrollAll() { public void testIndicesAnalyze() { SecurityActionMapper securityActionMapper = new SecurityActionMapper(); - AnalyzeRequest analyzeRequest; + AnalyzeAction.Request analyzeRequest; if (randomBoolean()) { - analyzeRequest = new AnalyzeRequest(randomAlphaOfLength(randomIntBetween(1, 30))).text("text"); + analyzeRequest = new AnalyzeAction.Request(randomAlphaOfLength(randomIntBetween(1, 30))).text("text"); } else { - analyzeRequest = new AnalyzeRequest(null).text("text"); + analyzeRequest = new AnalyzeAction.Request(null).text("text"); analyzeRequest.index(randomAlphaOfLength(randomIntBetween(1, 30))); } assertThat(securityActionMapper.action(AnalyzeAction.NAME, analyzeRequest), equalTo(AnalyzeAction.NAME)); @@ -85,7 +84,7 @@ public void testIndicesAnalyze() { public void testClusterAnalyze() { SecurityActionMapper securityActionMapper = new SecurityActionMapper(); - AnalyzeRequest analyzeRequest = new AnalyzeRequest(null).text("text"); + AnalyzeAction.Request analyzeRequest = new AnalyzeAction.Request(null).text("text"); assertThat(securityActionMapper.action(AnalyzeAction.NAME, analyzeRequest), equalTo(SecurityActionMapper.CLUSTER_PERMISSION_ANALYZE)); } diff --git a/x-pack/plugin/sql/build.gradle b/x-pack/plugin/sql/build.gradle index 1d13df3b2c32e..dac38447c78fe 100644 --- a/x-pack/plugin/sql/build.gradle +++ b/x-pack/plugin/sql/build.gradle @@ -38,16 +38,15 @@ task internalClusterTest(type: Test) { check.dependsOn internalClusterTest dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here compileOnly project(path: xpackModule('core'), configuration: 'default') compileOnly(project(':modules:lang-painless')) { // exclude ASM to not affect featureAware task on Java 10+ exclude group: "org.ow2.asm" } compile project('sql-action') - compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}" + compile project(':modules:aggs-matrix-stats') compile "org.antlr:antlr4-runtime:4.5.3" - testCompile "org.elasticsearch.test:framework:${version}" + testCompile project(':test:framework') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') if (isEclipse) { testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle index 37e0baf00aa71..292b3ea890bf8 100644 --- a/x-pack/plugin/sql/jdbc/build.gradle +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -18,15 +18,15 @@ dependencies { compile (xpackProject('plugin:sql:sql-proto')) { transitive = false } - compile (project(':libs:x-content')) { + compile (project(':libs:elasticsearch-x-content')) { transitive = false } compile (project(':libs:elasticsearch-geo')) { transitive = false } - compile project(':libs:core') + compile project(':libs:elasticsearch-core') runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - testCompile "org.elasticsearch.test:framework:${version}" + testCompile project(":test:framework") testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') if (isEclipse) { testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') diff --git a/x-pack/plugin/sql/qa/build.gradle b/x-pack/plugin/sql/qa/build.gradle index f2a6acd61a058..14bc1faa3fa43 100644 --- a/x-pack/plugin/sql/qa/build.gradle +++ b/x-pack/plugin/sql/qa/build.gradle @@ -6,7 +6,7 @@ archivesBaseName = 'qa-sql' group = "org.elasticsearch.x-pack.qa.sql" dependencies { - compile "org.elasticsearch.test:framework:${version}" + compile project(":test:framework") // JDBC testing dependencies compile project(path: xpackModule('sql:jdbc'), configuration: 'nodeps') @@ -59,7 +59,7 @@ subprojects { testCompile(xpackProject('plugin:sql:qa')) { transitive = false } - testCompile "org.elasticsearch.test:framework:${version}" + testCompile project(":test:framework") // JDBC testing dependencies testRuntime "net.sourceforge.csvjdbc:csvjdbc:${csvjdbcVersion}" diff --git a/x-pack/plugin/sql/qa/security/build.gradle b/x-pack/plugin/sql/qa/security/build.gradle index a0e6e82ed4d67..33a4963c10376 100644 --- a/x-pack/plugin/sql/qa/security/build.gradle +++ b/x-pack/plugin/sql/qa/security/build.gradle @@ -1,5 +1,5 @@ dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile project(':x-pack:plugin:core') } Project mainProject = project @@ -26,7 +26,7 @@ subprojects { } dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile project(":x-pack:plugin:core") } integTestCluster { diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec index 936c7eef88191..3cb2c3f45d4aa 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec @@ -643,9 +643,9 @@ M |57 ; groupByAndAggExpression -// tag::groupByAndAggExpression schema::g:s|salary:i -SELECT gender AS g, ROUND( (MIN(salary) / 100) ) AS salary FROM emp GROUP BY gender; +// tag::groupByAndAggExpression +SELECT gender AS g, ROUND((MIN(salary) / 100)) AS salary FROM emp GROUP BY gender; g | salary ---------------+--------------- @@ -1001,12 +1001,13 @@ Frank Herbert |God Emperor of Dune|7.0029488 optionalParamsForMatch // tag::optionalParamsForMatch -SELECT author, name, SCORE() FROM library WHERE MATCH(name, 'to the star', 'operator=or;cutoff_frequency=0.2'); +SELECT author, name, SCORE() FROM library WHERE MATCH(name, 'to the star', 'operator=OR;fuzziness=AUTO:1,5;minimum_should_match=1') +ORDER BY SCORE() DESC LIMIT 2; author | name | SCORE() -----------------+------------------------------------+--------------- -Peter F. Hamilton|Pandora's Star |3.0997515 Douglas Adams |The Hitchhiker's Guide to the Galaxy|3.1756816 +Peter F. Hamilton|Pandora's Star |3.0997515 // end::optionalParamsForMatch ; diff --git a/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec index cb410080e77bd..6379f6bf26f1a 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec @@ -92,14 +92,14 @@ SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_nam ; matchQueryWithOptions -SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_name, 'Erez', 'lenient=true;cutoff_frequency=2;fuzzy_rewrite=scoring_boolean;minimum_should_match=1;operator=AND;max_expansions=30;prefix_length=1;analyzer=english;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_name, 'Erez', 'lenient=true;fuzzy_rewrite=scoring_boolean;minimum_should_match=1;operator=AND;max_expansions=30;prefix_length=1;analyzer=english;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); emp_no:i | first_name:s | gender:s | last_name:s 10076 |Erez |F |Ritzmann ; matchQueryWithOptionsInMultipleCommaSeparatedStrings -SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_name, 'Erez', 'lenient=true;cutoff_frequency=2','fuzzy_rewrite=scoring_boolean;minimum_should_match=1','operator=AND', 'max_expansions=30;prefix_length=1;analyzer=english;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_name, 'Erez', 'lenient=true','fuzzy_rewrite=scoring_boolean;minimum_should_match=1','operator=AND', 'max_expansions=30;prefix_length=1;analyzer=english;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); emp_no:i | first_name:s | gender:s | last_name:s 10076 |Erez |F |Ritzmann @@ -113,14 +113,14 @@ SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH('first_na ; multiMatchQueryAllOptions -SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH('first_name,last_name', 'Morton', 'slop=1;lenient=true;cutoff_frequency=2;tie_breaker=0.1;fuzzy_rewrite=scoring_boolean;minimum_should_match=1;operator=AND;max_expansions=30;prefix_length=1;analyzer=english;type=best_fields;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH('first_name,last_name', 'Morton', 'slop=1;lenient=true;tie_breaker=0.1;fuzzy_rewrite=scoring_boolean;minimum_should_match=1;operator=AND;max_expansions=30;prefix_length=1;analyzer=english;type=best_fields;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); emp_no:i | first_name:s | gender:s | last_name:s 10095 |Hilari |M |Morton ; multiMatchQueryWithInMultipleCommaSeparatedStrings -SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH('first_name,last_name', 'Morton', 'slop=1;lenient=true', 'cutoff_frequency=2','tie_breaker=0.1;fuzzy_rewrite=scoring_boolean','minimum_should_match=1;operator=AND;max_expansions=30;prefix_length=1;analyzer=english;type=best_fields;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); +SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH('first_name,last_name', 'Morton', 'slop=1;lenient=true', 'tie_breaker=0.1;fuzzy_rewrite=scoring_boolean','minimum_should_match=1;operator=AND;max_expansions=30;prefix_length=1;analyzer=english;type=best_fields;auto_generate_synonyms_phrase_query=true;fuzzy_transpositions=true'); emp_no:i | first_name:s | gender:s | last_name:s 10095 |Hilari |M |Morton diff --git a/x-pack/plugin/sql/sql-action/build.gradle b/x-pack/plugin/sql/sql-action/build.gradle index 86a028186f441..defbf3dac852e 100644 --- a/x-pack/plugin/sql/sql-action/build.gradle +++ b/x-pack/plugin/sql/sql-action/build.gradle @@ -13,10 +13,10 @@ dependencies { compile (project(':server')) { transitive = false } - compile (project(':libs:core')) { + compile (project(':libs:elasticsearch-core')) { transitive = false } - compile (project(':libs:x-content')) { + compile (project(':libs:elasticsearch-x-content')) { transitive = false } compile xpackProject('plugin:sql:sql-proto') @@ -26,7 +26,7 @@ dependencies { runtime "org.apache.logging.log4j:log4j-api:${versions.log4j}" runtime "org.apache.logging.log4j:log4j-core:${versions.log4j}" - testCompile "org.elasticsearch.test:framework:${version}" + testCompile project(":test:framework") } forbiddenApisMain { diff --git a/x-pack/plugin/sql/sql-cli/build.gradle b/x-pack/plugin/sql/sql-cli/build.gradle index 6f57ea279c5ab..927d165c2d268 100644 --- a/x-pack/plugin/sql/sql-cli/build.gradle +++ b/x-pack/plugin/sql/sql-cli/build.gradle @@ -25,10 +25,10 @@ dependencies { compile xpackProject('plugin:sql:sql-client') compile xpackProject('plugin:sql:sql-action') - compile "org.elasticsearch:elasticsearch-cli:${version}" + compile project(":libs:elasticsearch-cli") runtime "org.elasticsearch:jna:${versions.jna}" - testCompile "org.elasticsearch.test:framework:${version}" + testCompile project(":test:framework") } dependencyLicenses { diff --git a/x-pack/plugin/sql/sql-client/build.gradle b/x-pack/plugin/sql/sql-client/build.gradle index 613ca73a4dbad..cc6f097880e38 100644 --- a/x-pack/plugin/sql/sql-client/build.gradle +++ b/x-pack/plugin/sql/sql-client/build.gradle @@ -10,7 +10,7 @@ description = 'Code shared between jdbc and cli' dependencies { compile xpackProject('plugin:sql:sql-proto') compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - testCompile "org.elasticsearch.test:framework:${version}" + testCompile project(":test:framework") } dependencyLicenses { diff --git a/x-pack/plugin/sql/sql-proto/build.gradle b/x-pack/plugin/sql/sql-proto/build.gradle index b1c055a0dfcb8..af890d82968e3 100644 --- a/x-pack/plugin/sql/sql-proto/build.gradle +++ b/x-pack/plugin/sql/sql-proto/build.gradle @@ -8,15 +8,15 @@ description = 'Request and response objects shared by the cli, jdbc ' + 'and the Elasticsearch plugin' dependencies { - compile (project(':libs:core')) { + compile (project(':libs:elasticsearch-core')) { transitive = false } - compile (project(':libs:x-content')) { + compile (project(':libs:elasticsearch-x-content')) { transitive = false } runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - testCompile "org.elasticsearch.test:framework:${version}" + testCompile project(":test:framework") } forbiddenApisMain { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java index 58ca09da929f9..3fca7630bd5a6 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java @@ -32,7 +32,6 @@ public class MatchQuery extends LeafQuery { BUILDER_APPLIERS = Map.ofEntries( entry("analyzer", MatchQueryBuilder::analyzer), entry("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))), - entry("cutoff_frequency", (qb, s) -> qb.cutoffFrequency(Float.valueOf(s))), entry("fuzziness", (qb, s) -> qb.fuzziness(Fuzziness.fromString(s))), entry("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))), entry("fuzzy_rewrite", MatchQueryBuilder::fuzzyRewrite), diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java index f51f8275a898c..ab6190ad6eee1 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java @@ -32,7 +32,6 @@ public class MultiMatchQuery extends LeafQuery { // appliers.put("zero_terms_query", (qb, s) -> qb.zeroTermsQuery(s)); entry("analyzer", MultiMatchQueryBuilder::analyzer), entry("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))), - entry("cutoff_frequency", (qb, s) -> qb.cutoffFrequency(Float.valueOf(s))), entry("fuzziness", (qb, s) -> qb.fuzziness(Fuzziness.fromString(s))), entry("fuzzy_rewrite", MultiMatchQueryBuilder::fuzzyRewrite), entry("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))), diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.find_file_structure.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.find_file_structure.json index 4e5550ae824a9..2f65a5d974911 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.find_file_structure.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.find_file_structure.json @@ -11,6 +11,11 @@ "description": "How many lines of the file should be included in the analysis", "default": 1000 }, + "line_merge_size_limit": { + "type": "int", + "description": "Maximum number of characters permitted in a single message when lines are merged to create messages.", + "default": 10000 + }, "timeout": { "type": "time", "description": "Timeout after which the analysis will be aborted", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml index 31f80033e7bdb..d156344b5ad6f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml @@ -90,9 +90,6 @@ teardown: - match: { airline-data-by-airline-start-stop.mappings: {} } --- "Test start/stop/start transform": - - skip: - reason: "https://github.com/elastic/elasticsearch/issues/42650" - version: "all" - do: data_frame.start_data_frame_transform: transform_id: "airline-transform-start-stop" @@ -190,8 +187,10 @@ teardown: - do: data_frame.stop_data_frame_transform: transform_id: "airline-transform-start-stop" + wait_for_completion: true - match: { acknowledged: true } + - do: data_frame.get_data_frame_transform_stats: transform_id: "airline-transform-start-later" @@ -209,3 +208,46 @@ teardown: - do: data_frame.delete_data_frame_transform: transform_id: "airline-transform-start-later" + +--- +"Test stop all": + - do: + data_frame.put_data_frame_transform: + transform_id: "airline-transform-stop-all" + body: > + { + "source": { "index": "airline-data" }, + "dest": { "index": "airline-data-start-later" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } + - do: + data_frame.start_data_frame_transform: + transform_id: "airline-transform-stop-all" + - match: { acknowledged: true } + + - do: + data_frame.start_data_frame_transform: + transform_id: "airline-transform-start-stop" + - match: { acknowledged: true } + + - do: + data_frame.stop_data_frame_transform: + transform_id: "_all" + wait_for_completion: true + - match: { acknowledged: true } + + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "*" + - match: { count: 2 } + - match: { transforms.0.state.indexer_state: "stopped" } + - match: { transforms.0.state.task_state: "stopped" } + - match: { transforms.1.state.indexer_state: "stopped" } + - match: { transforms.1.state.task_state: "stopped" } + + - do: + data_frame.delete_data_frame_transform: + transform_id: "airline-transform-stop-all" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/find_file_structure.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/find_file_structure.yml index 7c6aff66e3df2..a9634605aaac2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/find_file_structure.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/find_file_structure.yml @@ -10,6 +10,7 @@ setup: Content-Type: "application/json" ml.find_file_structure: lines_to_sample: 3 + line_merge_size_limit: 1234 timeout: 10s body: - airline: AAL diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml index 371f7c7207fa3..3ff1c1bb6b4d7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml @@ -59,9 +59,10 @@ setup: - is_true: started - do: - catch: /Cannot start task for Rollup Job \[foo\] because state was/ headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser rollup.start_job: id: foo + - is_true: started + diff --git a/x-pack/plugin/watcher/build.gradle b/x-pack/plugin/watcher/build.gradle index 1585488bdb5b3..20695d805190b 100644 --- a/x-pack/plugin/watcher/build.gradle +++ b/x-pack/plugin/watcher/build.gradle @@ -23,9 +23,7 @@ dependencyLicenses { } dependencies { - compileOnly "org.elasticsearch:elasticsearch:${version}" - - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here + compileOnly project(':server') compileOnly project(path: xpackModule('core'), configuration: 'default') compileOnly project(path: ':modules:transport-netty4', configuration: 'runtime') compileOnly project(path: ':plugins:transport-nio', configuration: 'runtime') @@ -34,7 +32,7 @@ dependencies { if (isEclipse) { testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') } - testCompile "org.elasticsearch.plugin:x-pack-ilm:${version}" + testCompile project(xpackModule('ilm')) // watcher deps compile 'com.googlecode.owasp-java-html-sanitizer:owasp-java-html-sanitizer:r239' diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java index 5b11b444db3ca..5199aa6bf22f2 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -41,7 +41,6 @@ import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.watcher.WatcherState; import org.elasticsearch.xpack.core.watcher.client.WatcherClient; @@ -328,7 +327,7 @@ protected WatchParser watchParser() { } protected WatcherClient watcherClient() { - return randomBoolean() ? new XPackClient(client()).watcher() : new WatcherClient(client()); + return new WatcherClient(client()); } private IndexNameExpressionResolver indexNameExpressionResolver() { diff --git a/x-pack/qa/kerberos-tests/build.gradle b/x-pack/qa/kerberos-tests/build.gradle index e4c261b4c5d57..4313aad9e4295 100644 --- a/x-pack/qa/kerberos-tests/build.gradle +++ b/x-pack/qa/kerberos-tests/build.gradle @@ -12,7 +12,7 @@ testFixtures.useFixture ":test:fixtures:krb5kdc-fixture" integTest.enabled = false dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile project(':x-pack:plugin:core') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') if (isEclipse) { testCompile project(path: xpackModule('core-tests'), configuration: 'testArtifacts') diff --git a/x-pack/qa/oidc-op-tests/build.gradle b/x-pack/qa/oidc-op-tests/build.gradle index 34325bc69b624..f4034764a49e2 100644 --- a/x-pack/qa/oidc-op-tests/build.gradle +++ b/x-pack/qa/oidc-op-tests/build.gradle @@ -5,7 +5,6 @@ apply plugin: 'elasticsearch.rest-test' apply plugin: 'elasticsearch.test.fixtures' dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') if (isEclipse) { diff --git a/x-pack/qa/openldap-tests/build.gradle b/x-pack/qa/openldap-tests/build.gradle index f732d8fc5b030..0f1d728e9b8aa 100644 --- a/x-pack/qa/openldap-tests/build.gradle +++ b/x-pack/qa/openldap-tests/build.gradle @@ -2,7 +2,6 @@ apply plugin: 'elasticsearch.standalone-test' apply plugin: 'elasticsearch.test.fixtures' dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') diff --git a/x-pack/qa/reindex-tests-with-security/build.gradle b/x-pack/qa/reindex-tests-with-security/build.gradle index 7f878e6356b73..48384aed17a84 100644 --- a/x-pack/qa/reindex-tests-with-security/build.gradle +++ b/x-pack/qa/reindex-tests-with-security/build.gradle @@ -4,7 +4,6 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index d75ecbd7a55ed..f58ee9ed1569e 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -11,7 +11,7 @@ test.enabled = false dependencies { testCompile project(':x-pack:qa') - testCompile ("org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}") + testCompile project(':client:rest-high-level') } Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java index 4a0639050d522..69c515d80a3d2 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java @@ -7,7 +7,6 @@ import org.apache.http.HttpHeaders; import org.apache.http.HttpHost; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -26,7 +25,6 @@ import java.util.List; import java.util.Map; -@AwaitsFix(bugUrl = "need to backport #42651") public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase { private Collection twoClients = null; diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml index a34128579f3f8..f426d9b2525b4 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/50_token_auth.yml @@ -2,8 +2,6 @@ "Get the indexed token and use if to authenticate": - skip: features: headers - version: " - 7.99.99" - reason: "Need to backport PR #42651" - do: cluster.health: @@ -61,8 +59,6 @@ "Get the indexed refreshed access token and use if to authenticate": - skip: features: headers - version: " - 7.99.99" - reason: "Need to backport PR #42651" - do: get: @@ -115,8 +111,6 @@ "Get the indexed refresh token and use it to get another access token and authenticate": - skip: features: headers - version: " - 7.99.99" - reason: "Need to backport PR #42651" - do: get: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml index 64897707c15d3..430f94c1064d6 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/50_token_auth.yml @@ -2,8 +2,6 @@ "Get the indexed token and use if to authenticate": - skip: features: headers - version: " - 8.0.0" - reason: "Need to backport PR #42651" - do: cluster.health: @@ -51,8 +49,6 @@ "Get the indexed refresh token and use if to get another access token and authenticate": - skip: features: headers - version: " - 8.0.0" - reason: "Need to backport PR #42651" - do: get: diff --git a/x-pack/qa/security-example-spi-extension/build.gradle b/x-pack/qa/security-example-spi-extension/build.gradle index 4790df3609c35..3796b55976ab8 100644 --- a/x-pack/qa/security-example-spi-extension/build.gradle +++ b/x-pack/qa/security-example-spi-extension/build.gradle @@ -8,8 +8,8 @@ esplugin { } dependencies { - compileOnly "org.elasticsearch.plugin:x-pack-core:${version}" - testCompile "org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}" + compileOnly project(':x-pack:plugin:core') + testCompile project(':client:rest-high-level') } diff --git a/x-pack/qa/security-setup-password-tests/build.gradle b/x-pack/qa/security-setup-password-tests/build.gradle index 604fffbe32f76..61b25d4ae8d7f 100644 --- a/x-pack/qa/security-setup-password-tests/build.gradle +++ b/x-pack/qa/security-setup-password-tests/build.gradle @@ -2,7 +2,6 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('security'), configuration: 'runtime') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index d4fe2129363c5..2f850636037d5 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -6,7 +6,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile project(':x-pack:plugin:core') testCompile project(':client:rest-high-level') } diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index e184ef19596af..d2f688889a95f 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -164,7 +164,7 @@ public void testSearchInputWithInsufficientPrivileges() throws Exception { String indexName = "index_not_allowed_to_read"; try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); - builder.startObject("trigger").startObject("schedule").field("interval", "1s").endObject().endObject(); + builder.startObject("trigger").startObject("schedule").field("interval", "4s").endObject().endObject(); builder.startObject("input").startObject("search").startObject("request") .startArray("indices").value(indexName).endArray() .startObject("body").startObject("query").startObject("match_all").endObject().endObject().endObject() @@ -180,8 +180,10 @@ public void testSearchInputWithInsufficientPrivileges() throws Exception { // check history, after watch has fired ObjectPath objectPath = getWatchHistoryEntry(watchId); - String state = objectPath.evaluate("hits.hits.0._source.state"); - assertThat(state, is("execution_not_needed")); + assertBusy(() -> { + String state = objectPath.evaluate("hits.hits.0._source.state"); + assertThat(state, is("execution_not_needed")); + }); boolean conditionMet = objectPath.evaluate("hits.hits.0._source.result.condition.met"); assertThat(conditionMet, is(false)); } diff --git a/x-pack/qa/third-party/jira/build.gradle b/x-pack/qa/third-party/jira/build.gradle index 43667300a3383..c01f6f129b9b1 100644 --- a/x-pack/qa/third-party/jira/build.gradle +++ b/x-pack/qa/third-party/jira/build.gradle @@ -7,7 +7,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile project(':x-pack:plugin:core') testCompile project(path: xpackModule('watcher'), configuration: 'runtime') } diff --git a/x-pack/qa/third-party/pagerduty/build.gradle b/x-pack/qa/third-party/pagerduty/build.gradle index 9013d8c281538..69c9848447044 100644 --- a/x-pack/qa/third-party/pagerduty/build.gradle +++ b/x-pack/qa/third-party/pagerduty/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile project(':x-pack:plugin:core') testCompile project(path: xpackModule('watcher'), configuration: 'runtime') } diff --git a/x-pack/qa/third-party/slack/build.gradle b/x-pack/qa/third-party/slack/build.gradle index 9fdfaeb826667..956631714c040 100644 --- a/x-pack/qa/third-party/slack/build.gradle +++ b/x-pack/qa/third-party/slack/build.gradle @@ -2,7 +2,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' dependencies { - testCompile "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile project(':x-pack:plugin:core') testCompile project(path: xpackModule('watcher'), configuration: 'runtime') } diff --git a/x-pack/test/feature-aware/build.gradle b/x-pack/test/feature-aware/build.gradle index e3c7ae96063d0..9d7f1504418d2 100644 --- a/x-pack/test/feature-aware/build.gradle +++ b/x-pack/test/feature-aware/build.gradle @@ -2,9 +2,9 @@ apply plugin: 'elasticsearch.build' dependencies { compile 'org.ow2.asm:asm:7.1' - compile "org.elasticsearch:elasticsearch:${version}" - compile "org.elasticsearch.plugin:x-pack-core:${version}" - testCompile "org.elasticsearch.test:framework:${version}" + compile project(':server') + compile project(':x-pack:plugin:core') + testCompile project(':test:framework') } forbiddenApisMain.enabled = true