diff --git a/build.gradle b/build.gradle index 862943b50f5eb..a29c783422172 100644 --- a/build.gradle +++ b/build.gradle @@ -486,25 +486,17 @@ task run(type: Run) { impliesSubProjects = true } -task wrapper(type: Wrapper) - -gradle.projectsEvaluated { - - allprojects { - tasks.withType(Wrapper) { Wrapper wrapper -> - wrapper.distributionType = DistributionType.ALL - - wrapper.doLast { +wrapper { + distributionType = DistributionType.ALL + doLast { final DistributionLocator locator = new DistributionLocator() final GradleVersion version = GradleVersion.version(wrapper.gradleVersion) final URI distributionUri = locator.getDistributionFor(version, wrapper.distributionType.name().toLowerCase(Locale.ENGLISH)) final URI sha256Uri = new URI(distributionUri.toString() + ".sha256") final String sha256Sum = new String(sha256Uri.toURL().bytes) wrapper.getPropertiesFile() << "distributionSha256Sum=${sha256Sum}\n" - } + println "Added checksum to wrapper properties" } - } - } static void assertLinesInFile(final Path path, final List expectedLines) { @@ -591,7 +583,7 @@ if (System.properties.get("build.compare") != null) { } } sourceBuild { - gradleVersion = "4.7" // does not default to gradle weapper of project dir, but current version + gradleVersion = "4.8.1" // does not default to gradle weapper of project dir, but current version projectDir = referenceProject tasks = ["clean", "assemble"] arguments = ["-Dbuild.compare_friendly=true"] diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 9ae86a661cea2..4e31de08829cc 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -16,8 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - - import java.nio.file.Files plugins { @@ -41,6 +39,12 @@ if (project == rootProject) { buildDir = 'build-bootstrap' } +// Make sure :buildSrc: doesn't generate classes incompatible with RUNTIME_JAVA_HOME +// We can't use BuildPlugin here, so read from file +String minimumRuntimeVersion = file('src/main/resources/minimumRuntimeVersion').text.trim() +targetCompatibility = minimumRuntimeVersion +sourceCompatibility = minimumRuntimeVersion + /***************************************************************************** * Propagating version.properties to the rest of the build * *****************************************************************************/ diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 8a2b1b798e163..04fcbe0776b1a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -19,7 +19,6 @@ package org.elasticsearch.gradle import com.carrotsearch.gradle.junit4.RandomizedTestingTask -import nebula.plugin.extraconfigurations.ProvidedBasePlugin import org.apache.tools.ant.taskdefs.condition.Os import org.eclipse.jgit.lib.Constants import org.eclipse.jgit.lib.RepositoryBuilder @@ -58,9 +57,6 @@ import java.time.ZonedDateTime */ class BuildPlugin implements Plugin { - static final JavaVersion minimumRuntimeVersion = JavaVersion.VERSION_1_8 - static final JavaVersion minimumCompilerVersion = JavaVersion.VERSION_1_10 - @Override void apply(Project project) { if (project.pluginManager.hasPlugin('elasticsearch.standalone-rest-test')) { @@ -95,6 +91,12 @@ class BuildPlugin implements Plugin { /** Performs checks on the build environment and prints information about the build environment. */ static void globalBuildInfo(Project project) { if (project.rootProject.ext.has('buildChecksDone') == false) { + JavaVersion minimumRuntimeVersion = JavaVersion.toVersion( + BuildPlugin.class.getClassLoader().getResourceAsStream("minimumRuntimeVersion").text.trim() + ) + JavaVersion minimumCompilerVersion = JavaVersion.toVersion( + BuildPlugin.class.getClassLoader().getResourceAsStream("minimumCompilerVersion").text.trim() + ) String compilerJavaHome = findCompilerJavaHome() String runtimeJavaHome = findRuntimeJavaHome(compilerJavaHome) File gradleJavaHome = Jvm.current().javaHome @@ -192,10 +194,12 @@ class BuildPlugin implements Plugin { project.rootProject.ext.runtimeJavaVersion = runtimeJavaVersionEnum project.rootProject.ext.javaVersions = javaVersions project.rootProject.ext.buildChecksDone = true + project.rootProject.ext.minimumCompilerVersion = minimumCompilerVersion + project.rootProject.ext.minimumRuntimeVersion = minimumRuntimeVersion } - project.targetCompatibility = minimumRuntimeVersion - project.sourceCompatibility = minimumRuntimeVersion + project.targetCompatibility = project.rootProject.ext.minimumRuntimeVersion + project.sourceCompatibility = project.rootProject.ext.minimumRuntimeVersion // set java home for each project, so they dont have to find it in the root project project.ext.compilerJavaHome = project.rootProject.ext.compilerJavaHome @@ -348,7 +352,7 @@ class BuildPlugin implements Plugin { // just a self contained test-fixture configuration, likely transitive and hellacious return } - configuration.resolutionStrategy { + configuration.resolutionStrategy { failOnVersionConflict() } }) @@ -467,6 +471,24 @@ class BuildPlugin implements Plugin { /**Configuration generation of maven poms. */ public static void configurePomGeneration(Project project) { + // Only works with `enableFeaturePreview('STABLE_PUBLISHING')` + // https://github.com/gradle/gradle/issues/5696#issuecomment-396965185 + project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask -> + // The GenerateMavenPom task is aggressive about setting the destination, instead of fighting it, + // just make a copy. + doLast { + project.copy { + from generatePOMTask.destination + into "${project.buildDir}/distributions" + rename { "${project.archivesBaseName}-${project.version}.pom" } + } + } + // build poms with assemble (if the assemble task exists) + Task assemble = project.tasks.findByName('assemble') + if (assemble) { + assemble.dependsOn(generatePOMTask) + } + } project.plugins.withType(MavenPublishPlugin.class).whenPluginAdded { project.publishing { publications { @@ -476,20 +498,6 @@ class BuildPlugin implements Plugin { } } } - - // Work around Gradle 4.8 issue until we `enableFeaturePreview('STABLE_PUBLISHING')` - // https://github.com/gradle/gradle/issues/5696#issuecomment-396965185 - project.getGradle().getTaskGraph().whenReady { - project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom t -> - // place the pom next to the jar it is for - t.destination = new File(project.buildDir, "distributions/${project.archivesBaseName}-${project.version}.pom") - // build poms with assemble (if the assemble task exists) - Task assemble = project.tasks.findByName('assemble') - if (assemble) { - assemble.dependsOn(t) - } - } - } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index 0a60d6ef87a44..eb4da8d1f314c 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -159,16 +159,18 @@ public class PluginBuildPlugin extends BuildPlugin { /** Adds a task to move jar and associated files to a "-client" name. */ protected static void addClientJarTask(Project project) { Task clientJar = project.tasks.create('clientJar') - clientJar.dependsOn(project.jar, 'generatePomFileForClientJarPublication', project.javadocJar, project.sourcesJar) + clientJar.dependsOn(project.jar, project.tasks.generatePomFileForClientJarPublication, project.javadocJar, project.sourcesJar) clientJar.doFirst { Path jarFile = project.jar.outputs.files.singleFile.toPath() String clientFileName = jarFile.fileName.toString().replace(project.version, "client-${project.version}") Files.copy(jarFile, jarFile.resolveSibling(clientFileName), StandardCopyOption.REPLACE_EXISTING) - String pomFileName = jarFile.fileName.toString().replace('.jar', '.pom') String clientPomFileName = clientFileName.replace('.jar', '.pom') - Files.copy(jarFile.resolveSibling(pomFileName), jarFile.resolveSibling(clientPomFileName), - StandardCopyOption.REPLACE_EXISTING) + Files.copy( + project.tasks.generatePomFileForClientJarPublication.outputs.files.singleFile.toPath(), + jarFile.resolveSibling(clientPomFileName), + StandardCopyOption.REPLACE_EXISTING + ) String sourcesFileName = jarFile.fileName.toString().replace('.jar', '-sources.jar') String clientSourcesFileName = clientFileName.replace('.jar', '-sources.jar') diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy index c48dc890ab080..390821c80ff39 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneRestTestPlugin.groovy @@ -29,6 +29,7 @@ import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.Task import org.gradle.api.plugins.JavaBasePlugin +import org.gradle.api.tasks.compile.JavaCompile /** * Configures the build to compile tests against Elasticsearch's test framework @@ -61,5 +62,12 @@ public class StandaloneRestTestPlugin implements Plugin { PrecommitTasks.create(project, false) project.check.dependsOn(project.precommit) + + project.tasks.withType(JavaCompile) { + // This will be the default in Gradle 5.0 + if (options.compilerArgs.contains("-processor") == false) { + options.compilerArgs << '-proc:none' + } + } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy index 3e1f62f96e6bd..e38163d616661 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy @@ -50,12 +50,5 @@ public class StandaloneTestPlugin implements Plugin { test.testClassesDirs = project.sourceSets.test.output.classesDirs test.mustRunAfter(project.precommit) project.check.dependsOn(test) - - project.tasks.withType(JavaCompile) { - // This will be the default in Gradle 5.0 - if (options.compilerArgs.contains("-processor") == false) { - options.compilerArgs << '-proc:none' - } - } } } diff --git a/buildSrc/src/main/resources/minimumCompilerVersion b/buildSrc/src/main/resources/minimumCompilerVersion new file mode 100644 index 0000000000000..578c71bd55827 --- /dev/null +++ b/buildSrc/src/main/resources/minimumCompilerVersion @@ -0,0 +1 @@ +1.10 \ No newline at end of file diff --git a/buildSrc/src/main/resources/minimumRuntimeVersion b/buildSrc/src/main/resources/minimumRuntimeVersion new file mode 100644 index 0000000000000..468437494697b --- /dev/null +++ b/buildSrc/src/main/resources/minimumRuntimeVersion @@ -0,0 +1 @@ +1.8 \ No newline at end of file diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/BaseTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/BaseTestCase.java index 48a62f8900fae..c3262ee1e26e6 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/BaseTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/BaseTestCase.java @@ -21,6 +21,7 @@ import com.carrotsearch.randomizedtesting.JUnit4MethodProvider; import com.carrotsearch.randomizedtesting.RandomizedRunner; import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; import org.junit.Assert; import org.junit.runner.RunWith; @@ -29,5 +30,6 @@ JUnit4MethodProvider.class, JUnit3MethodProvider.class }) +@ThreadLeakLingering(linger = 5000) // wait for "Connection worker" to die public abstract class BaseTestCase extends Assert { } diff --git a/client/benchmark/build.gradle b/client/benchmark/build.gradle index 9ca53292a4956..77867f5e273f2 100644 --- a/client/benchmark/build.gradle +++ b/client/benchmark/build.gradle @@ -24,7 +24,7 @@ buildscript { } } dependencies { - classpath 'com.github.jengelman.gradle.plugins:shadow:2.0.2' + classpath 'com.github.jengelman.gradle.plugins:shadow:2.0.4' } } @@ -46,9 +46,7 @@ mainClassName = 'org.elasticsearch.client.benchmark.BenchmarkMain' // never try to invoke tests on the benchmark project - there aren't any -check.dependsOn.remove(test) -// explicitly override the test task too in case somebody invokes 'gradle test' so it won't trip -task test(type: Test, overwrite: true) +test.enabled = false dependencies { compile 'org.apache.commons:commons-math3:3.2' diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 00b275f701c6d..934b952608674 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -794,8 +794,10 @@ static URI buildUri(String pathPrefix, String path, Map params) Objects.requireNonNull(path, "path must not be null"); try { String fullPath; - if (pathPrefix != null) { - if (path.startsWith("/")) { + if (pathPrefix != null && pathPrefix.isEmpty() == false) { + if (pathPrefix.endsWith("/") && path.startsWith("/")) { + fullPath = pathPrefix.substring(0, pathPrefix.length() - 1) + path; + } else if (pathPrefix.endsWith("/") || path.startsWith("/")) { fullPath = pathPrefix + path; } else { fullPath = pathPrefix + "/" + path; diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java index fb61f4f17c483..dd3f5ad5a7274 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java @@ -143,22 +143,26 @@ public RestClientBuilder setRequestConfigCallback(RequestConfigCallback requestC * For example, if this is set to "/my/path", then any client request will become "/my/path/" + endpoint. *

* In essence, every request's {@code endpoint} is prefixed by this {@code pathPrefix}. The path prefix is useful for when - * Elasticsearch is behind a proxy that provides a base path; it is not intended for other purposes and it should not be supplied in - * other scenarios. + * Elasticsearch is behind a proxy that provides a base path or a proxy that requires all paths to start with '/'; + * it is not intended for other purposes and it should not be supplied in other scenarios. * * @throws NullPointerException if {@code pathPrefix} is {@code null}. - * @throws IllegalArgumentException if {@code pathPrefix} is empty, only '/', or ends with more than one '/'. + * @throws IllegalArgumentException if {@code pathPrefix} is empty, or ends with more than one '/'. */ public RestClientBuilder setPathPrefix(String pathPrefix) { Objects.requireNonNull(pathPrefix, "pathPrefix must not be null"); - String cleanPathPrefix = pathPrefix; + if (pathPrefix.isEmpty()) { + throw new IllegalArgumentException("pathPrefix must not be empty"); + } + + String cleanPathPrefix = pathPrefix; if (cleanPathPrefix.startsWith("/") == false) { cleanPathPrefix = "/" + cleanPathPrefix; } // best effort to ensure that it looks like "/base/path" rather than "/base/path/" - if (cleanPathPrefix.endsWith("/")) { + if (cleanPathPrefix.endsWith("/") && cleanPathPrefix.length() > 1) { cleanPathPrefix = cleanPathPrefix.substring(0, cleanPathPrefix.length() - 1); if (cleanPathPrefix.endsWith("/")) { @@ -166,9 +170,6 @@ public RestClientBuilder setPathPrefix(String pathPrefix) { } } - if (cleanPathPrefix.isEmpty() || "/".equals(cleanPathPrefix)) { - throw new IllegalArgumentException("pathPrefix must not be empty or '/': [" + pathPrefix + "]"); - } this.pathPrefix = cleanPathPrefix; return this; diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java index 9fcb4978e28a7..834748d65de34 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java @@ -180,7 +180,6 @@ public void testSetPathPrefixNull() { } public void testSetPathPrefixEmpty() { - assertSetPathPrefixThrows("/"); assertSetPathPrefixThrows(""); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 271fc51ef8835..ef94b70542f6c 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -223,12 +223,33 @@ public void onFailure(Exception exception) { } public void testBuildUriLeavesPathUntouched() { + final Map emptyMap = Collections.emptyMap(); { - URI uri = RestClient.buildUri("/foo$bar", "/index/type/id", Collections.emptyMap()); + URI uri = RestClient.buildUri("/foo$bar", "/index/type/id", emptyMap); assertEquals("/foo$bar/index/type/id", uri.getPath()); } { - URI uri = RestClient.buildUri(null, "/foo$bar/ty/pe/i/d", Collections.emptyMap()); + URI uri = RestClient.buildUri("/", "/*", emptyMap); + assertEquals("/*", uri.getPath()); + } + { + URI uri = RestClient.buildUri("/", "*", emptyMap); + assertEquals("/*", uri.getPath()); + } + { + URI uri = RestClient.buildUri(null, "*", emptyMap); + assertEquals("*", uri.getPath()); + } + { + URI uri = RestClient.buildUri("", "*", emptyMap); + assertEquals("*", uri.getPath()); + } + { + URI uri = RestClient.buildUri(null, "/*", emptyMap); + assertEquals("/*", uri.getPath()); + } + { + URI uri = RestClient.buildUri(null, "/foo$bar/ty/pe/i/d", emptyMap); assertEquals("/foo$bar/ty/pe/i/d", uri.getPath()); } { diff --git a/docs/plugins/api.asciidoc b/docs/plugins/api.asciidoc index 4dffb9608821f..74fbba25810d8 100644 --- a/docs/plugins/api.asciidoc +++ b/docs/plugins/api.asciidoc @@ -26,6 +26,9 @@ A number of plugins have been contributed by our community: * https://github.com/NLPchina/elasticsearch-sql/[SQL language Plugin]: Allows Elasticsearch to be queried with SQL (by nlpcn) +* https://github.com/ritesh-kapoor/elasticsearch-pql[PQL language Plugin]: + Allows Elasticsearch to be queried with simple pipeline query syntax. + * https://github.com/codelibs/elasticsearch-taste[Elasticsearch Taste Plugin]: Mahout Taste-based Collaborative Filtering implementation (by CodeLibs Project) diff --git a/docs/reference/licensing/get-trial-status.asciidoc b/docs/reference/licensing/get-trial-status.asciidoc index ec47782a3d2ee..2590c50f4b5ca 100644 --- a/docs/reference/licensing/get-trial-status.asciidoc +++ b/docs/reference/licensing/get-trial-status.asciidoc @@ -13,12 +13,11 @@ This API enables you to check the status of your trial license. [float] ==== Description -If a license is not already registered for the cluster, one is generated when -the nodes start. By default, this is a 30-day trial license that gives access -to all {xpack} features. +If you want to try the features that are included in a platinum license, you can +start a 30-day trial. NOTE: You are allowed to initiate a trial license only if your cluster has not -already activated a trial license for the current major X-Pack version. For +already activated a trial license for the current major product version. For example, if you have already activated a trial for v6.0, you cannot start a new trial until v7.0. You can, however, contact `info@elastic.co` to request an extended trial license. diff --git a/docs/reference/licensing/start-basic.asciidoc b/docs/reference/licensing/start-basic.asciidoc index 3206dc0801f36..ffb9bb9a04f49 100644 --- a/docs/reference/licensing/start-basic.asciidoc +++ b/docs/reference/licensing/start-basic.asciidoc @@ -14,7 +14,7 @@ This API starts an indefinite basic license. ==== Description The `start basic` API enables you to initiate an indefinite basic license, which -gives access to all {xpack} basic features. If the basic license does not support +gives access to all the basic features. If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the `acknowledge` parameter set to `true`. diff --git a/docs/reference/licensing/start-trial.asciidoc b/docs/reference/licensing/start-trial.asciidoc index ba1cc0d786693..6b7598e96e461 100644 --- a/docs/reference/licensing/start-trial.asciidoc +++ b/docs/reference/licensing/start-trial.asciidoc @@ -14,10 +14,10 @@ This API starts a 30-day trial license. ==== Description The `start trial` API enables you to upgrade from a basic license to a 30-day -trial license, which gives access to all {xpack} features. +trial license, which gives access to the platinum features. NOTE: You are allowed to initiate a trial license only if your cluster has not -already activated a trial license for the current major X-Pack version. For +already activated a trial license for the current major product version. For example, if you have already activated a trial for v6.0, you cannot start a new trial until v7.0. You can, however, contact `info@elastic.co` to request an extended trial license. diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index a98bca676152d..ba2fc27ed4263 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -328,7 +328,7 @@ role mappings are not considered. Defaults to `false`. `files.role_mapping`:: The {xpack-ref}/security-files.html[location] for the {xpack-ref}/mapping-roles.html#mapping-roles[ YAML role mapping configuration file]. Defaults to -`CONFIG_DIR/role_mapping.yml`. +`ES_PATH_CONF/role_mapping.yml`. `follow_referrals`:: Specifies whether {security} should follow referrals returned @@ -494,7 +494,7 @@ considered. Defaults to `false`. `files.role_mapping`:: The {xpack-ref}/security-files.html[location] for the YAML -role mapping configuration file. Defaults to `CONFIG_DIR/role_mapping.yml`. +role mapping configuration file. Defaults to `ES_PATH_CONF/role_mapping.yml`. `user_search.base_dn`:: The context to search for a user. Defaults to the root @@ -719,7 +719,7 @@ for SSL. This setting cannot be used with `certificate_authorities`. `files.role_mapping`:: Specifies the {xpack-ref}/security-files.html[location] of the {xpack-ref}/mapping-roles.html[YAML role mapping configuration file]. -Defaults to `CONFIG_DIR/role_mapping.yml`. +Defaults to `ES_PATH_CONF/role_mapping.yml`. `cache.ttl`:: Specifies the time-to-live for cached user entries. A user and a hash of its diff --git a/docs/reference/setup/secure-settings.asciidoc b/docs/reference/setup/secure-settings.asciidoc index 78731f7663d11..1474ba137697d 100644 --- a/docs/reference/setup/secure-settings.asciidoc +++ b/docs/reference/setup/secure-settings.asciidoc @@ -1,5 +1,5 @@ [[secure-settings]] -=== Secure Settings +=== Secure settings Some settings are sensitive, and relying on filesystem permissions to protect their values is not sufficient. For this use case, Elasticsearch provides a @@ -16,6 +16,10 @@ Elasticsearch. NOTE: The elasticsearch keystore currently only provides obfuscation. In the future, password protection will be added. +These settings, just like the regular ones in the `elasticsearch.yml` config file, +need to be specified on each node in the cluster. Currently, all secure settings +are node-specific settings that must have the same value on every node. + [float] [[creating-keystore]] === Creating the keystore diff --git a/docs/reference/testing/testing-framework.asciidoc b/docs/reference/testing/testing-framework.asciidoc index f5634a2558ea5..dfc7371dd3756 100644 --- a/docs/reference/testing/testing-framework.asciidoc +++ b/docs/reference/testing/testing-framework.asciidoc @@ -54,7 +54,7 @@ If your test is a well isolated unit test which doesn't need a running Elasticse [[integration-tests]] -=== integration tests +=== Integration tests These kind of tests require firing up a whole cluster of nodes, before the tests can actually be run. Compared to unit tests they are obviously way more time consuming, but the test infrastructure tries to minimize the time cost by only restarting the whole cluster, if this is configured explicitly. @@ -62,14 +62,14 @@ The class your tests have to inherit from is `ESIntegTestCase`. By inheriting fr [[number-of-shards]] -==== number of shards +==== Number of shards The number of shards used for indices created during integration tests is randomized between `1` and `10` unless overwritten upon index creation via index settings. The rule of thumb is not to specify the number of shards unless needed, so that each test will use a different one all the time. Alternatively you can override the `numberOfShards()` method. The same applies to the `numberOfReplicas()` method. [[helper-methods]] -==== generic helper methods +==== Generic helper methods There are a couple of helper methods in `ESIntegTestCase`, which will make your tests shorter and more concise. @@ -88,7 +88,7 @@ There are a couple of helper methods in `ESIntegTestCase`, which will make your [[test-cluster-methods]] -==== test cluster methods +==== Test cluster methods The `InternalTestCluster` class is the heart of the cluster functionality in a randomized test and allows you to configure a specific setting or replay certain types of outages to check, how your custom code reacts. diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java index 02388d838bc2a..6c68710c6d8bd 100644 --- a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java @@ -52,7 +52,7 @@ public final class Grok { "%\\{" + "(?" + "(?[A-z0-9]+)" + - "(?::(?[A-z0-9_:.-]+))?" + + "(?::(?[[:alnum:]@\\[\\]_:.-]+))?" + ")" + "(?:=(?" + "(?:" + @@ -81,11 +81,11 @@ public final class Grok { public Grok(Map patternBank, String grokPattern) { this(patternBank, grokPattern, true, ThreadWatchdog.noop()); } - + public Grok(Map patternBank, String grokPattern, ThreadWatchdog threadWatchdog) { this(patternBank, grokPattern, true, threadWatchdog); } - + Grok(Map patternBank, String grokPattern, boolean namedCaptures) { this(patternBank, grokPattern, namedCaptures, ThreadWatchdog.noop()); } diff --git a/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java b/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java index 8d79aa290ebff..d30cf3d6fa21b 100644 --- a/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java +++ b/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java @@ -412,10 +412,10 @@ public void testMultipleNamedCapturesWithSameName() { expected.put("num", "1"); assertThat(grok.captures("12"), equalTo(expected)); } - + public void testExponentialExpressions() { AtomicBoolean run = new AtomicBoolean(true); // to avoid a lingering thread when test has completed - + String grokPattern = "Bonsuche mit folgender Anfrage: Belegart->\\[%{WORD:param2},(?(\\s*%{NOTSPACE})*)\\] " + "Zustand->ABGESCHLOSSEN Kassennummer->%{WORD:param9} Bonnummer->%{WORD:param10} Datum->%{DATESTAMP_OTHER:param11}"; String logLine = "Bonsuche mit folgender Anfrage: Belegart->[EINGESCHRAENKTER_VERKAUF, VERKAUF, NACHERFASSUNG] " + @@ -439,4 +439,50 @@ public void testExponentialExpressions() { run.set(false); assertThat(e.getMessage(), equalTo("grok pattern matching was interrupted after [200] ms")); } + + public void testAtInFieldName() { + assertGrokedField("@metadata"); + } + + public void assertNonAsciiLetterInFieldName() { + assertGrokedField("metädata"); + } + + public void assertSquareBracketInFieldName() { + assertGrokedField("metadat[a]"); + assertGrokedField("metad[a]ta"); + assertGrokedField("[m]etadata"); + } + + public void testUnderscoreInFieldName() { + assertGrokedField("meta_data"); + } + + public void testDotInFieldName() { + assertGrokedField("meta.data"); + } + + public void testMinusInFieldName() { + assertGrokedField("meta-data"); + } + + public void testAlphanumericFieldName() { + assertGrokedField(randomAlphaOfLengthBetween(1, 5)); + assertGrokedField(randomAlphaOfLengthBetween(1, 5) + randomIntBetween(0, 100)); + assertGrokedField(randomIntBetween(0, 100) + randomAlphaOfLengthBetween(1, 5)); + assertGrokedField(String.valueOf(randomIntBetween(0, 100))); + } + + public void testUnsupportedBracketsInFieldName() { + Grok grok = new Grok(basePatterns, "%{WORD:unsuppo(r)ted}"); + Map matches = grok.captures("line"); + assertNull(matches); + } + + private void assertGrokedField(String fieldName) { + String line = "foo"; + Grok grok = new Grok(basePatterns, "%{WORD:" + fieldName + "}"); + Map matches = grok.captures(line); + assertEquals(line, matches.get(fieldName)); + } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java index aca5f4a56d393..4e328ea2c984e 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java @@ -58,7 +58,7 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder hasParentQuery(null, query, false)); - assertThat(e.getMessage(), equalTo("[has_parent] requires 'type' field")); + assertThat(e.getMessage(), equalTo("[has_parent] requires 'parent_type' field")); e = expectThrows(IllegalArgumentException.class, () -> hasParentQuery("foo", null, false)); diff --git a/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/10_basic.yml b/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/10_basic.yml index 2def885234c3e..b8181040665a1 100644 --- a/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/10_basic.yml +++ b/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/10_basic.yml @@ -238,3 +238,30 @@ teardown: repository: repository-file snapshot: snapshot-one +--- +"Get a non existing snapshot": + + - do: + catch: /snapshot_missing_exception/ + snapshot.get: + repository: repository-url + snapshot: missing + +--- +"Delete a non existing snapshot": + + - do: + catch: /snapshot_missing_exception/ + snapshot.delete: + repository: repository-url + snapshot: missing + +--- +"Restore a non existing snapshot": + + - do: + catch: /snapshot_restore_exception/ + snapshot.restore: + repository: repository-url + snapshot: missing + wait_for_completion: true diff --git a/plugins/repository-azure/qa/microsoft-azure-storage/src/test/resources/rest-api-spec/test/repository_azure/10_repository.yml b/plugins/repository-azure/qa/microsoft-azure-storage/src/test/resources/rest-api-spec/test/repository_azure/10_repository.yml index 9ab1fafac3e97..df3e1b3216ab9 100644 --- a/plugins/repository-azure/qa/microsoft-azure-storage/src/test/resources/rest-api-spec/test/repository_azure/10_repository.yml +++ b/plugins/repository-azure/qa/microsoft-azure-storage/src/test/resources/rest-api-spec/test/repository_azure/10_repository.yml @@ -1,6 +1,6 @@ # Integration tests for repository-azure --- -"Snapshot/Restore with repository-azure": +setup: # Register repository - do: @@ -13,7 +13,8 @@ client: "integration_test" base_path: ${base_path} - - match: { acknowledged: true } +--- +"Snapshot/Restore with repository-azure": # Get repository - do: @@ -172,3 +173,66 @@ repository: repository snapshot: snapshot-one master_timeout: 5m + +--- +"Register a repository with a non existing container": + + - do: + catch: /repository_verification_exception/ + snapshot.create_repository: + repository: repository + body: + type: azure + settings: + container: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE + client: integration_test + +--- +"Register a repository with a non existing client": + + - do: + # TODO this should be a repository_exception + catch: /settings_exception/ + snapshot.create_repository: + repository: repository + body: + type: azure + settings: + bucket: repository + client: unknown + +--- +"Get a non existing snapshot": + + - do: + catch: /snapshot_missing_exception/ + snapshot.get: + repository: repository + snapshot: missing + +--- +"Delete a non existing snapshot": + + - do: + catch: /snapshot_missing_exception/ + snapshot.delete: + repository: repository + snapshot: missing + +--- +"Restore a non existing snapshot": + + - do: + catch: /snapshot_restore_exception/ + snapshot.restore: + repository: repository + snapshot: missing + wait_for_completion: true + +--- +teardown: + + # Remove our repository + - do: + snapshot.delete_repository: + repository: repository diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index f2702b139a69d..72b62a930aeee 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -43,7 +43,7 @@ public class AzureRepositoryPlugin extends Plugin implements RepositoryPlugin, R public AzureRepositoryPlugin(Settings settings) { // eagerly load client settings so that secure settings are read - this.azureStoreService = new AzureStorageServiceImpl(settings); + this.azureStoreService = new AzureStorageService(settings); } @Override diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 272c550f1d723..73dd68f4b5f57 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -19,27 +19,59 @@ package org.elasticsearch.repositories.azure; +import com.microsoft.azure.storage.AccessCondition; +import com.microsoft.azure.storage.CloudStorageAccount; import com.microsoft.azure.storage.OperationContext; +import com.microsoft.azure.storage.RetryExponentialRetry; +import com.microsoft.azure.storage.RetryPolicy; +import com.microsoft.azure.storage.StorageErrorCodeStrings; import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.BlobInputStream; +import com.microsoft.azure.storage.blob.BlobListingDetails; +import com.microsoft.azure.storage.blob.BlobProperties; import com.microsoft.azure.storage.blob.CloudBlobClient; - +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.CloudBlockBlob; +import com.microsoft.azure.storage.blob.DeleteSnapshotsOption; +import com.microsoft.azure.storage.blob.ListBlobItem; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; +import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import java.io.IOException; import java.io.InputStream; +import java.net.HttpURLConnection; +import java.net.URI; import java.net.URISyntaxException; import java.nio.file.FileAlreadyExistsException; +import java.security.InvalidKeyException; +import java.util.EnumSet; import java.util.Map; import java.util.function.Supplier; -/** - * Azure Storage Service interface - * @see AzureStorageServiceImpl for Azure REST API implementation - */ -public interface AzureStorageService { +import static java.util.Collections.emptyMap; + +public class AzureStorageService extends AbstractComponent { + + public static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES); + public static final ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB); + + // 'package' for testing + volatile Map storageSettings = emptyMap(); + + public AzureStorageService(Settings settings) { + super(settings); + // eagerly load client settings so that secure settings are read + final Map clientsSettings = AzureStorageSettings.load(settings); + refreshAndClearCache(clientsSettings); + } /** * Creates a {@code CloudBlobClient} on each invocation using the current client @@ -48,7 +80,46 @@ public interface AzureStorageService { * thread for logically coupled ops. The {@code OperationContext} is used to * specify the proxy, but a new context is *required* for each call. */ - Tuple> client(String clientName); + public Tuple> client(String clientName) { + final AzureStorageSettings azureStorageSettings = this.storageSettings.get(clientName); + if (azureStorageSettings == null) { + throw new SettingsException("Unable to find client with name [" + clientName + "]"); + } + try { + return new Tuple<>(buildClient(azureStorageSettings), () -> buildOperationContext(azureStorageSettings)); + } catch (InvalidKeyException | URISyntaxException | IllegalArgumentException e) { + throw new SettingsException("Invalid azure client settings with name [" + clientName + "]", e); + } + } + + protected CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { + final CloudBlobClient client = createClient(azureStorageSettings); + // Set timeout option if the user sets cloud.azure.storage.timeout or + // cloud.azure.storage.xxx.timeout (it's negative by default) + final long timeout = azureStorageSettings.getTimeout().getMillis(); + if (timeout > 0) { + if (timeout > Integer.MAX_VALUE) { + throw new IllegalArgumentException("Timeout [" + azureStorageSettings.getTimeout() + "] exceeds 2,147,483,647ms."); + } + client.getDefaultRequestOptions().setTimeoutIntervalInMs((int) timeout); + } + // We define a default exponential retry policy + client.getDefaultRequestOptions() + .setRetryPolicyFactory(new RetryExponentialRetry(RetryPolicy.DEFAULT_CLIENT_BACKOFF, azureStorageSettings.getMaxRetries())); + client.getDefaultRequestOptions().setLocationMode(azureStorageSettings.getLocationMode()); + return client; + } + + protected CloudBlobClient createClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { + final String connectionString = azureStorageSettings.buildConnectionString(); + return CloudStorageAccount.parse(connectionString).createCloudBlobClient(); + } + + protected OperationContext buildOperationContext(AzureStorageSettings azureStorageSettings) { + final OperationContext context = new OperationContext(); + context.setProxy(azureStorageSettings.getProxy()); + return context; + } /** * Updates settings for building clients. Any client cache is cleared. Future @@ -57,32 +128,134 @@ public interface AzureStorageService { * @param clientsSettings the settings for new clients * @return the old settings */ - Map refreshAndClearCache(Map clientsSettings); - - ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES); - ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB); - - boolean doesContainerExist(String account, String container) throws URISyntaxException, StorageException; + public Map refreshAndClearCache(Map clientsSettings) { + final Map prevSettings = this.storageSettings; + this.storageSettings = MapBuilder.newMapBuilder(clientsSettings).immutableMap(); + // clients are built lazily by {@link client(String)} + return prevSettings; + } - void removeContainer(String account, String container) throws URISyntaxException, StorageException; + public boolean doesContainerExist(String account, String container) throws URISyntaxException, StorageException { + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + return SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, client.v2().get())); + } - void createContainer(String account, String container) throws URISyntaxException, StorageException; + public void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException { + final Tuple> client = client(account); + // container name must be lower case. + logger.trace(() -> new ParameterizedMessage("delete files container [{}], path [{}]", container, path)); + SocketAccess.doPrivilegedVoidException(() -> { + // list the blobs using a flat blob listing mode + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + for (final ListBlobItem blobItem : blobContainer.listBlobs(path, true, EnumSet.noneOf(BlobListingDetails.class), null, + client.v2().get())) { + final String blobName = blobNameFromUri(blobItem.getUri()); + logger.trace(() -> new ParameterizedMessage("removing blob [{}] full URI was [{}]", blobName, blobItem.getUri())); + // don't call {@code #deleteBlob}, use the same client + final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blobName); + azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, client.v2().get()); + } + }); + } - void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException; + /** + * Extract the blob name from a URI like https://myservice.azure.net/container/path/to/myfile + * It should remove the container part (first part of the path) and gives path/to/myfile + * @param uri URI to parse + * @return The blob name relative to the container + */ + static String blobNameFromUri(URI uri) { + final String path = uri.getPath(); + // We remove the container name from the path + // The 3 magic number cames from the fact if path is /container/path/to/myfile + // First occurrence is empty "/" + // Second occurrence is "container + // Last part contains "path/to/myfile" which is what we want to get + final String[] splits = path.split("/", 3); + // We return the remaining end of the string + return splits[2]; + } - boolean blobExists(String account, String container, String blob) throws URISyntaxException, StorageException; + public boolean blobExists(String account, String container, String blob) throws URISyntaxException, StorageException { + // Container name must be lower case. + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + return SocketAccess.doPrivilegedException(() -> { + final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob); + return azureBlob.exists(null, null, client.v2().get()); + }); + } - void deleteBlob(String account, String container, String blob) throws URISyntaxException, StorageException; + public void deleteBlob(String account, String container, String blob) throws URISyntaxException, StorageException { + final Tuple> client = client(account); + // Container name must be lower case. + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + logger.trace(() -> new ParameterizedMessage("delete blob for container [{}], blob [{}]", container, blob)); + SocketAccess.doPrivilegedVoidException(() -> { + final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob); + logger.trace(() -> new ParameterizedMessage("container [{}]: blob [{}] found. removing.", container, blob)); + azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, client.v2().get()); + }); + } - InputStream getInputStream(String account, String container, String blob) throws URISyntaxException, StorageException, IOException; + public InputStream getInputStream(String account, String container, String blob) + throws URISyntaxException, StorageException, IOException { + final Tuple> client = client(account); + final CloudBlockBlob blockBlobReference = client.v1().getContainerReference(container).getBlockBlobReference(blob); + logger.trace(() -> new ParameterizedMessage("reading container [{}], blob [{}]", container, blob)); + final BlobInputStream is = SocketAccess.doPrivilegedException(() -> + blockBlobReference.openInputStream(null, null, client.v2().get())); + return giveSocketPermissionsToStream(is); + } - Map listBlobsByPrefix(String account, String container, String keyPath, String prefix) - throws URISyntaxException, StorageException; + public Map listBlobsByPrefix(String account, String container, String keyPath, String prefix) + throws URISyntaxException, StorageException { + // NOTE: this should be here: if (prefix == null) prefix = ""; + // however, this is really inefficient since deleteBlobsByPrefix enumerates everything and + // then does a prefix match on the result; it should just call listBlobsByPrefix with the prefix! + final MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); + final EnumSet enumBlobListingDetails = EnumSet.of(BlobListingDetails.METADATA); + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + logger.trace(() -> new ParameterizedMessage("listing container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix)); + SocketAccess.doPrivilegedVoidException(() -> { + for (final ListBlobItem blobItem : blobContainer.listBlobs(keyPath + (prefix == null ? "" : prefix), false, + enumBlobListingDetails, null, client.v2().get())) { + final URI uri = blobItem.getUri(); + logger.trace(() -> new ParameterizedMessage("blob url [{}]", uri)); + // uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/ + // this requires 1 + container.length() + 1, with each 1 corresponding to one of the / + final String blobPath = uri.getPath().substring(1 + container.length() + 1); + final BlobProperties properties = ((CloudBlockBlob) blobItem).getProperties(); + final String name = blobPath.substring(keyPath.length()); + logger.trace(() -> new ParameterizedMessage("blob url [{}], name [{}], size [{}]", uri, name, properties.getLength())); + blobsBuilder.put(name, new PlainBlobMetaData(name, properties.getLength())); + } + }); + return blobsBuilder.immutableMap(); + } - void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize) - throws URISyntaxException, StorageException, FileAlreadyExistsException; + public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize) + throws URISyntaxException, StorageException, FileAlreadyExistsException { + logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {})", blobName, blobSize)); + final Tuple> client = client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + final CloudBlockBlob blob = blobContainer.getBlockBlobReference(blobName); + try { + SocketAccess.doPrivilegedVoidException(() -> + blob.upload(inputStream, blobSize, AccessCondition.generateIfNotExistsCondition(), null, client.v2().get())); + } catch (final StorageException se) { + if (se.getHttpStatusCode() == HttpURLConnection.HTTP_CONFLICT && + StorageErrorCodeStrings.BLOB_ALREADY_EXISTS.equals(se.getErrorCode())) { + throw new FileAlreadyExistsException(blobName, null, se.getMessage()); + } + throw se; + } + logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {}) - done", blobName, blobSize)); + } - static InputStream giveSocketPermissionsToStream(InputStream stream) { + static InputStream giveSocketPermissionsToStream(final InputStream stream) { return new InputStream() { @Override public int read() throws IOException { diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java deleted file mode 100644 index c10a6d674c848..0000000000000 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.azure; - -import com.microsoft.azure.storage.AccessCondition; -import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.OperationContext; -import com.microsoft.azure.storage.RetryExponentialRetry; -import com.microsoft.azure.storage.RetryPolicy; -import com.microsoft.azure.storage.StorageErrorCodeStrings; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.BlobInputStream; -import com.microsoft.azure.storage.blob.BlobListingDetails; -import com.microsoft.azure.storage.blob.BlobProperties; -import com.microsoft.azure.storage.blob.CloudBlobClient; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlockBlob; -import com.microsoft.azure.storage.blob.DeleteSnapshotsOption; -import com.microsoft.azure.storage.blob.ListBlobItem; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.common.blobstore.BlobMetaData; -import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; -import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.repositories.RepositoryException; - -import java.io.InputStream; -import java.net.HttpURLConnection; -import java.net.URI; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; -import java.nio.file.FileAlreadyExistsException; -import java.util.EnumSet; -import java.util.Map; -import java.util.function.Supplier; - -import static java.util.Collections.emptyMap; - -public class AzureStorageServiceImpl extends AbstractComponent implements AzureStorageService { - - // 'package' for testing - volatile Map storageSettings = emptyMap(); - - public AzureStorageServiceImpl(Settings settings) { - super(settings); - // eagerly load client settings so that secure settings are read - final Map clientsSettings = AzureStorageSettings.load(settings); - refreshAndClearCache(clientsSettings); - } - - @Override - public Tuple> client(String clientName) { - final AzureStorageSettings azureStorageSettings = this.storageSettings.get(clientName); - if (azureStorageSettings == null) { - throw new SettingsException("Unable to find client with name [" + clientName + "]"); - } - try { - return new Tuple<>(buildClient(azureStorageSettings), () -> buildOperationContext(azureStorageSettings)); - } catch (InvalidKeyException | URISyntaxException | IllegalArgumentException e) { - throw new SettingsException("Invalid azure client settings with name [" + clientName + "]", e); - } - } - - protected CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { - final CloudBlobClient client = createClient(azureStorageSettings); - // Set timeout option if the user sets cloud.azure.storage.timeout or - // cloud.azure.storage.xxx.timeout (it's negative by default) - final long timeout = azureStorageSettings.getTimeout().getMillis(); - if (timeout > 0) { - if (timeout > Integer.MAX_VALUE) { - throw new IllegalArgumentException("Timeout [" + azureStorageSettings.getTimeout() + "] exceeds 2,147,483,647ms."); - } - client.getDefaultRequestOptions().setTimeoutIntervalInMs((int) timeout); - } - // We define a default exponential retry policy - client.getDefaultRequestOptions() - .setRetryPolicyFactory(new RetryExponentialRetry(RetryPolicy.DEFAULT_CLIENT_BACKOFF, azureStorageSettings.getMaxRetries())); - client.getDefaultRequestOptions().setLocationMode(azureStorageSettings.getLocationMode()); - return client; - } - - protected CloudBlobClient createClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { - final String connectionString = azureStorageSettings.buildConnectionString(); - return CloudStorageAccount.parse(connectionString).createCloudBlobClient(); - } - - protected OperationContext buildOperationContext(AzureStorageSettings azureStorageSettings) { - final OperationContext context = new OperationContext(); - context.setProxy(azureStorageSettings.getProxy()); - return context; - } - - @Override - public Map refreshAndClearCache(Map clientsSettings) { - final Map prevSettings = this.storageSettings; - this.storageSettings = MapBuilder.newMapBuilder(clientsSettings).immutableMap(); - // clients are built lazily by {@link client(String)} - return prevSettings; - } - - @Override - public boolean doesContainerExist(String account, String container) throws URISyntaxException, StorageException { - final Tuple> client = client(account); - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - return SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, client.v2().get())); - } - - @Override - public void removeContainer(String account, String container) throws URISyntaxException, StorageException { - final Tuple> client = client(account); - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - logger.trace(() -> new ParameterizedMessage("removing container [{}]", container)); - SocketAccess.doPrivilegedException(() -> blobContainer.deleteIfExists(null, null, client.v2().get())); - } - - @Override - public void createContainer(String account, String container) throws URISyntaxException, StorageException { - try { - final Tuple> client = client(account); - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - logger.trace(() -> new ParameterizedMessage("creating container [{}]", container)); - SocketAccess.doPrivilegedException(() -> blobContainer.createIfNotExists(null, null, client.v2().get())); - } catch (final IllegalArgumentException e) { - logger.trace(() -> new ParameterizedMessage("failed creating container [{}]", container), e); - throw new RepositoryException(container, e.getMessage(), e); - } - } - - @Override - public void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException { - final Tuple> client = client(account); - // container name must be lower case. - logger.trace(() -> new ParameterizedMessage("delete files container [{}], path [{}]", container, path)); - SocketAccess.doPrivilegedVoidException(() -> { - // list the blobs using a flat blob listing mode - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - for (final ListBlobItem blobItem : blobContainer.listBlobs(path, true, EnumSet.noneOf(BlobListingDetails.class), null, - client.v2().get())) { - final String blobName = blobNameFromUri(blobItem.getUri()); - logger.trace(() -> new ParameterizedMessage("removing blob [{}] full URI was [{}]", blobName, blobItem.getUri())); - // don't call {@code #deleteBlob}, use the same client - final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blobName); - azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, client.v2().get()); - } - }); - } - - /** - * Extract the blob name from a URI like https://myservice.azure.net/container/path/to/myfile - * It should remove the container part (first part of the path) and gives path/to/myfile - * @param uri URI to parse - * @return The blob name relative to the container - */ - static String blobNameFromUri(URI uri) { - final String path = uri.getPath(); - // We remove the container name from the path - // The 3 magic number cames from the fact if path is /container/path/to/myfile - // First occurrence is empty "/" - // Second occurrence is "container - // Last part contains "path/to/myfile" which is what we want to get - final String[] splits = path.split("/", 3); - // We return the remaining end of the string - return splits[2]; - } - - @Override - public boolean blobExists(String account, String container, String blob) - throws URISyntaxException, StorageException { - // Container name must be lower case. - final Tuple> client = client(account); - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - return SocketAccess.doPrivilegedException(() -> { - final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob); - return azureBlob.exists(null, null, client.v2().get()); - }); - } - - @Override - public void deleteBlob(String account, String container, String blob) throws URISyntaxException, StorageException { - final Tuple> client = client(account); - // Container name must be lower case. - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - logger.trace(() -> new ParameterizedMessage("delete blob for container [{}], blob [{}]", container, blob)); - SocketAccess.doPrivilegedVoidException(() -> { - final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob); - logger.trace(() -> new ParameterizedMessage("container [{}]: blob [{}] found. removing.", container, blob)); - azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, client.v2().get()); - }); - } - - @Override - public InputStream getInputStream(String account, String container, String blob) throws URISyntaxException, - StorageException { - final Tuple> client = client(account); - final CloudBlockBlob blockBlobReference = client.v1().getContainerReference(container).getBlockBlobReference(blob); - logger.trace(() -> new ParameterizedMessage("reading container [{}], blob [{}]", container, blob)); - final BlobInputStream is = SocketAccess.doPrivilegedException(() -> - blockBlobReference.openInputStream(null, null, client.v2().get())); - return AzureStorageService.giveSocketPermissionsToStream(is); - } - - @Override - public Map listBlobsByPrefix(String account, String container, String keyPath, String prefix) - throws URISyntaxException, StorageException { - // NOTE: this should be here: if (prefix == null) prefix = ""; - // however, this is really inefficient since deleteBlobsByPrefix enumerates everything and - // then does a prefix match on the result; it should just call listBlobsByPrefix with the prefix! - final MapBuilder blobsBuilder = MapBuilder.newMapBuilder(); - final EnumSet enumBlobListingDetails = EnumSet.of(BlobListingDetails.METADATA); - final Tuple> client = client(account); - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - logger.trace(() -> new ParameterizedMessage("listing container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix)); - SocketAccess.doPrivilegedVoidException(() -> { - for (final ListBlobItem blobItem : blobContainer.listBlobs(keyPath + (prefix == null ? "" : prefix), false, - enumBlobListingDetails, null, client.v2().get())) { - final URI uri = blobItem.getUri(); - logger.trace(() -> new ParameterizedMessage("blob url [{}]", uri)); - // uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/ - // this requires 1 + container.length() + 1, with each 1 corresponding to one of the / - final String blobPath = uri.getPath().substring(1 + container.length() + 1); - final BlobProperties properties = ((CloudBlockBlob) blobItem).getProperties(); - final String name = blobPath.substring(keyPath.length()); - logger.trace(() -> new ParameterizedMessage("blob url [{}], name [{}], size [{}]", uri, name, properties.getLength())); - blobsBuilder.put(name, new PlainBlobMetaData(name, properties.getLength())); - } - }); - return blobsBuilder.immutableMap(); - } - - @Override - public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize) - throws URISyntaxException, StorageException, FileAlreadyExistsException { - logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {})", blobName, blobSize)); - final Tuple> client = client(account); - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - final CloudBlockBlob blob = blobContainer.getBlockBlobReference(blobName); - try { - SocketAccess.doPrivilegedVoidException(() -> - blob.upload(inputStream, blobSize, AccessCondition.generateIfNotExistsCondition(), null, client.v2().get())); - } catch (final StorageException se) { - if (se.getHttpStatusCode() == HttpURLConnection.HTTP_CONFLICT && - StorageErrorCodeStrings.BLOB_ALREADY_EXISTS.equals(se.getErrorCode())) { - throw new FileAlreadyExistsException(blobName, null, se.getMessage()); - } - throw se; - } - logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {}) - done", blobName, blobSize)); - } - -} diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java deleted file mode 100644 index 10163bb2f31df..0000000000000 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureSnapshotRestoreTests.java +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.azure; - -import com.carrotsearch.randomizedtesting.RandomizedTest; -import com.microsoft.azure.storage.StorageException; -import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; -import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; -import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.ClusterAdminClient; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.repositories.RepositoryMissingException; -import org.elasticsearch.repositories.RepositoryVerificationException; -import org.elasticsearch.repositories.azure.AzureRepository.Repository; -import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; -import org.elasticsearch.snapshots.SnapshotMissingException; -import org.elasticsearch.snapshots.SnapshotRestoreException; -import org.elasticsearch.snapshots.SnapshotState; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.ThirdParty; -import org.elasticsearch.test.store.MockFSDirectoryService; -import org.elasticsearch.test.store.MockFSIndexStore; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import java.net.URISyntaxException; -import java.util.Arrays; -import java.util.Collection; -import java.util.Locale; -import java.util.concurrent.TimeUnit; - -import static org.elasticsearch.repositories.azure.AzureTestUtils.generateMockSecureSettings; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; - -/** - * Those integration tests need an Azure access and must be run with - * {@code -Dtests.thirdparty=true -Dtests.azure.account=AzureStorageAccount -Dtests.azure.key=AzureStorageKey} - * options - */ -@ClusterScope( - scope = ESIntegTestCase.Scope.SUITE, - supportsDedicatedMasters = false, numDataNodes = 1, - transportClientRatio = 0.0) -@ThirdParty -public class AzureSnapshotRestoreTests extends ESBlobStoreRepositoryIntegTestCase { - - private static Settings.Builder generateMockSettings() { - return Settings.builder().setSecureSettings(generateMockSecureSettings()); - } - - @SuppressWarnings("resource") - private static AzureStorageService getAzureStorageService() { - return new AzureRepositoryPlugin(generateMockSettings().build()).azureStoreService; - } - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return generateMockSettings() - .put(super.nodeSettings(nodeOrdinal)) - .build(); - } - - private static String getContainerName() { - /* Have a different name per test so that there is no possible race condition. As the long can be negative, - * there mustn't be a hyphen between the 2 concatenated numbers - * (can't have 2 consecutives hyphens on Azure containers) - */ - final String testName = "snapshot-itest-" - .concat(RandomizedTest.getContext().getRunnerSeedAsString().toLowerCase(Locale.ROOT)); - return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; - } - - @BeforeClass - public static void createTestContainers() throws Exception { - createTestContainer(getContainerName()); - // This is needed for testMultipleRepositories() test case - createTestContainer(getContainerName() + "-1"); - createTestContainer(getContainerName() + "-2"); - } - - @AfterClass - public static void removeContainer() throws Exception { - removeTestContainer(getContainerName()); - // This is needed for testMultipleRepositories() test case - removeTestContainer(getContainerName() + "-1"); - removeTestContainer(getContainerName() + "-2"); - } - - /** - * Create a test container in Azure - * @param containerName container name to use - */ - private static void createTestContainer(String containerName) throws Exception { - // It could happen that we run this test really close to a previous one - // so we might need some time to be able to create the container - assertBusy(() -> { - getAzureStorageService().createContainer("default", containerName); - }, 30, TimeUnit.SECONDS); - } - - /** - * Remove a test container in Azure - * @param containerName container name to use - */ - private static void removeTestContainer(String containerName) throws URISyntaxException, StorageException { - getAzureStorageService().removeContainer("default", containerName); - } - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(AzureRepositoryPlugin.class, MockFSIndexStore.TestPlugin.class); - } - - private String getRepositoryPath() { - final String testName = "it-" + getTestName(); - return testName.contains(" ") ? Strings.split(testName, " ")[0] : testName; - } - - @Override - public Settings indexSettings() { - // During restore we frequently restore index to exactly the same state it was before, that might cause the same - // checksum file to be written twice during restore operation - return Settings.builder().put(super.indexSettings()) - .put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE_SETTING.getKey(), false) - .put(MockFSDirectoryService.RANDOM_NO_DELETE_OPEN_FILE_SETTING.getKey(), false) - .build(); - } - - @After - public final void wipeAzureRepositories() { - try { - client().admin().cluster().prepareDeleteRepository("*").get(); - } catch (final RepositoryMissingException ignored) { - } - } - - public void testMultipleRepositories() { - final Client client = client(); - logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); - final PutRepositoryResponse putRepositoryResponse1 = client.admin().cluster().preparePutRepository("test-repo1") - .setType("azure").setSettings(Settings.builder() - .put(Repository.CONTAINER_SETTING.getKey(), getContainerName().concat("-1")) - .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) - .put(Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) - ).get(); - assertThat(putRepositoryResponse1.isAcknowledged(), equalTo(true)); - final PutRepositoryResponse putRepositoryResponse2 = client.admin().cluster().preparePutRepository("test-repo2") - .setType("azure").setSettings(Settings.builder() - .put(Repository.CONTAINER_SETTING.getKey(), getContainerName().concat("-2")) - .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) - .put(Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) - ).get(); - assertThat(putRepositoryResponse2.isAcknowledged(), equalTo(true)); - - createIndex("test-idx-1", "test-idx-2"); - ensureGreen(); - - logger.info("--> indexing some data"); - for (int i = 0; i < 100; i++) { - index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); - index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); - } - refresh(); - assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L)); - assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits(), equalTo(100L)); - - logger.info("--> snapshot 1"); - final CreateSnapshotResponse createSnapshotResponse1 = client.admin().cluster().prepareCreateSnapshot("test-repo1", "test-snap") - .setWaitForCompletion(true).setIndices("test-idx-1").get(); - assertThat(createSnapshotResponse1.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat(createSnapshotResponse1.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse1.getSnapshotInfo().totalShards())); - - logger.info("--> snapshot 2"); - final CreateSnapshotResponse createSnapshotResponse2 = client.admin().cluster().prepareCreateSnapshot("test-repo2", "test-snap") - .setWaitForCompletion(true).setIndices("test-idx-2").get(); - assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse2.getSnapshotInfo().totalShards())); - - assertThat(client.admin().cluster().prepareGetSnapshots("test-repo1").setSnapshots("test-snap").get().getSnapshots().get(0).state(), - equalTo(SnapshotState.SUCCESS)); - assertThat(client.admin().cluster().prepareGetSnapshots("test-repo2").setSnapshots("test-snap").get().getSnapshots().get(0).state(), - equalTo(SnapshotState.SUCCESS)); - - // Test restore after index deletion - logger.info("--> delete indices"); - cluster().wipeIndices("test-idx-1", "test-idx-2"); - logger.info("--> restore one index after deletion from snapshot 1"); - final RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin().cluster().prepareRestoreSnapshot("test-repo1", "test-snap") - .setWaitForCompletion(true).setIndices("test-idx-1").get(); - assertThat(restoreSnapshotResponse1.getRestoreInfo().totalShards(), greaterThan(0)); - ensureGreen(); - assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L)); - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); - assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true)); - assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false)); - - logger.info("--> restore other index after deletion from snapshot 2"); - final RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin().cluster().prepareRestoreSnapshot("test-repo2", "test-snap") - .setWaitForCompletion(true).setIndices("test-idx-2").get(); - assertThat(restoreSnapshotResponse2.getRestoreInfo().totalShards(), greaterThan(0)); - ensureGreen(); - assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits(), equalTo(100L)); - clusterState = client.admin().cluster().prepareState().get().getState(); - assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true)); - assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(true)); - } - - /** - * For issue #26: https://github.com/elastic/elasticsearch-cloud-azure/issues/26 - */ - public void testListBlobs_26() throws StorageException, URISyntaxException { - final String repositoryName="test-repo-26"; - createIndex("test-idx-1", "test-idx-2", "test-idx-3"); - ensureGreen(); - - logger.info("--> indexing some data"); - for (int i = 0; i < 100; i++) { - index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); - index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); - index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i); - } - refresh(); - - final ClusterAdminClient client = client().admin().cluster(); - logger.info("--> creating azure repository without any path"); - PutRepositoryResponse putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure") - .setSettings(Settings.builder() - .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) - ).get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - - // Get all snapshots - should be empty - assertThat(client.prepareGetSnapshots(repositoryName).get().getSnapshots().size(), equalTo(0)); - - logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.prepareCreateSnapshot(repositoryName, "test-snap-26") - .setWaitForCompletion(true).setIndices("test-idx-*").get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - - // Get all snapshots - should have one - assertThat(client.prepareGetSnapshots(repositoryName).get().getSnapshots().size(), equalTo(1)); - - // Clean the snapshot - client.prepareDeleteSnapshot(repositoryName, "test-snap-26").get(); - client.prepareDeleteRepository(repositoryName).get(); - - logger.info("--> creating azure repository path [{}]", getRepositoryPath()); - putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure") - .setSettings(Settings.builder() - .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) - .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) - ).get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - - // Get all snapshots - should be empty - assertThat(client.prepareGetSnapshots(repositoryName).get().getSnapshots().size(), equalTo(0)); - - logger.info("--> snapshot"); - createSnapshotResponse = client.prepareCreateSnapshot(repositoryName, "test-snap-26").setWaitForCompletion(true) - .setIndices("test-idx-*").get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - - // Get all snapshots - should have one - assertThat(client.prepareGetSnapshots(repositoryName).get().getSnapshots().size(), equalTo(1)); - } - - /** - * For issue #28: https://github.com/elastic/elasticsearch-cloud-azure/issues/28 - */ - public void testGetDeleteNonExistingSnapshot_28() throws StorageException, URISyntaxException { - final String repositoryName="test-repo-28"; - final ClusterAdminClient client = client().admin().cluster(); - logger.info("--> creating azure repository without any path"); - final PutRepositoryResponse putRepositoryResponse = client.preparePutRepository(repositoryName).setType("azure") - .setSettings(Settings.builder() - .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) - ).get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - - try { - client.prepareGetSnapshots(repositoryName).addSnapshots("nonexistingsnapshotname").get(); - fail("Shouldn't be here"); - } catch (final SnapshotMissingException ex) { - // Expected - } - - try { - client.prepareDeleteSnapshot(repositoryName, "nonexistingsnapshotname").get(); - fail("Shouldn't be here"); - } catch (final SnapshotMissingException ex) { - // Expected - } - } - - /** - * Test case for issue #23: https://github.com/elastic/elasticsearch-cloud-azure/issues/23 - */ - public void testNonExistingRepo_23() { - final String repositoryName = "test-repo-test23"; - final Client client = client(); - logger.info("--> creating azure repository with path [{}]", getRepositoryPath()); - final PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository(repositoryName) - .setType("azure").setSettings(Settings.builder() - .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) - .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) - .put(Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000), ByteSizeUnit.BYTES) - ).get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - - logger.info("--> restore non existing snapshot"); - try { - client.admin().cluster().prepareRestoreSnapshot(repositoryName, "no-existing-snapshot").setWaitForCompletion(true).get(); - fail("Shouldn't be here"); - } catch (final SnapshotRestoreException ex) { - // Expected - } - } - - /** - * When a user remove a container you can not immediately create it again. - */ - public void testRemoveAndCreateContainer() throws Exception { - final String container = getContainerName().concat("-testremove"); - - createTestContainer(container); - removeTestContainer(container); - - final ClusterAdminClient client = client().admin().cluster(); - logger.info("--> creating azure repository while container is being removed"); - try { - client.preparePutRepository("test-repo").setType("azure") - .setSettings(Settings.builder() - .put(Repository.CONTAINER_SETTING.getKey(), container) - ).get(); - fail("we should get a RepositoryVerificationException"); - } catch (final RepositoryVerificationException e) { - // Fine we expect that - } - } - - /** - * Test that you can snapshot on the primary repository and list the available snapshots - * from the secondary repository. - * - * Note that this test requires an Azure storage account which must be a Read-access geo-redundant - * storage (RA-GRS) account type. - * @throws Exception If anything goes wrong - */ - public void testGeoRedundantStorage() throws Exception { - final Client client = client(); - logger.info("--> creating azure primary repository"); - final PutRepositoryResponse putRepositoryResponsePrimary = client.admin().cluster().preparePutRepository("primary") - .setType("azure").setSettings(Settings.builder() - .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) - ).get(); - assertThat(putRepositoryResponsePrimary.isAcknowledged(), equalTo(true)); - - logger.info("--> start get snapshots on primary"); - long startWait = System.currentTimeMillis(); - client.admin().cluster().prepareGetSnapshots("primary").get(); - long endWait = System.currentTimeMillis(); - // definitely should be done in 30s, and if its not working as expected, it takes over 1m - assertThat(endWait - startWait, lessThanOrEqualTo(30000L)); - - logger.info("--> creating azure secondary repository"); - final PutRepositoryResponse putRepositoryResponseSecondary = client.admin().cluster().preparePutRepository("secondary") - .setType("azure").setSettings(Settings.builder() - .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) - .put(Repository.LOCATION_MODE_SETTING.getKey(), "secondary_only") - ).get(); - assertThat(putRepositoryResponseSecondary.isAcknowledged(), equalTo(true)); - - logger.info("--> start get snapshots on secondary"); - startWait = System.currentTimeMillis(); - client.admin().cluster().prepareGetSnapshots("secondary").get(); - endWait = System.currentTimeMillis(); - logger.info("--> end of get snapshots on secondary. Took {} ms", endWait - startWait); - assertThat(endWait - startWait, lessThanOrEqualTo(30000L)); - } - - @Override - protected void createTestRepository(String name) { - assertAcked(client().admin().cluster().preparePutRepository(name) - .setType(AzureRepository.TYPE) - .setSettings(Settings.builder() - .put(Repository.CONTAINER_SETTING.getKey(), getContainerName()) - .put(Repository.BASE_PATH_SETTING.getKey(), getRepositoryPath()) - .put(Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); - } -} diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 46af05c3845aa..264cb90378529 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.internal.io.Streams; @@ -49,7 +48,7 @@ /** * In memory storage for unit tests */ -public class AzureStorageServiceMock extends AbstractComponent implements AzureStorageService { +public class AzureStorageServiceMock extends AzureStorageService { protected final Map blobs = new ConcurrentHashMap<>(); @@ -62,14 +61,6 @@ public boolean doesContainerExist(String account, String container) { return true; } - @Override - public void removeContainer(String account, String container) { - } - - @Override - public void createContainer(String account, String container) { - } - @Override public void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException { final Map blobs = listBlobsByPrefix(account, container, path, null); diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java index 3308db682fece..3b3793f22ba04 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java @@ -37,7 +37,7 @@ import java.nio.charset.StandardCharsets; import java.util.Map; -import static org.elasticsearch.repositories.azure.AzureStorageServiceImpl.blobNameFromUri; +import static org.elasticsearch.repositories.azure.AzureStorageService.blobNameFromUri; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -64,7 +64,7 @@ public void testCreateClientWithEndpointSuffix() throws IOException { final Settings settings = Settings.builder().setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.endpoint_suffix", "my_endpoint_suffix").build(); try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings)) { - final AzureStorageServiceImpl azureStorageService = (AzureStorageServiceImpl) plugin.azureStoreService; + final AzureStorageService azureStorageService = plugin.azureStoreService; final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getEndpoint().toString(), equalTo("https://myaccount1.blob.my_endpoint_suffix")); final CloudBlobClient client2 = azureStorageService.client("azure2").v1(); @@ -86,7 +86,7 @@ public void testReinitClientSettings() throws IOException { secureSettings2.setString("azure.client.azure3.key", encodeKey("mykey23")); final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build(); try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings1)) { - final AzureStorageServiceImpl azureStorageService = (AzureStorageServiceImpl) plugin.azureStoreService; + final AzureStorageService azureStorageService = plugin.azureStoreService; final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount11.blob.core.windows.net")); final CloudBlobClient client12 = azureStorageService.client("azure2").v1(); @@ -118,7 +118,7 @@ public void testReinitClientEmptySettings() throws IOException { secureSettings.setString("azure.client.azure1.key", encodeKey("mykey11")); final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings)) { - final AzureStorageServiceImpl azureStorageService = (AzureStorageServiceImpl) plugin.azureStoreService; + final AzureStorageService azureStorageService = plugin.azureStoreService; final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); // reinit with empty settings @@ -142,7 +142,7 @@ public void testReinitClientWrongSettings() throws IOException { // missing key final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build(); try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings1)) { - final AzureStorageServiceImpl azureStorageService = (AzureStorageServiceImpl) plugin.azureStoreService; + final AzureStorageService azureStorageService = plugin.azureStoreService; final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); plugin.reload(settings2); @@ -154,7 +154,7 @@ public void testReinitClientWrongSettings() throws IOException { } public void testGetSelectedClientNonExisting() { - final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(buildSettings()); + final AzureStorageService azureStorageService = new AzureStorageService(buildSettings()); final SettingsException e = expectThrows(SettingsException.class, () -> azureStorageService.client("azure4")); assertThat(e.getMessage(), is("Unable to find client with name [azure4]")); } @@ -164,7 +164,7 @@ public void testGetSelectedClientDefaultTimeout() { .setSecureSettings(buildSecureSettings()) .put("azure.client.azure3.timeout", "30s") .build(); - final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(timeoutSettings); + final AzureStorageService azureStorageService = new AzureStorageService(timeoutSettings); final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), nullValue()); final CloudBlobClient client3 = azureStorageService.client("azure3").v1(); @@ -172,13 +172,13 @@ public void testGetSelectedClientDefaultTimeout() { } public void testGetSelectedClientNoTimeout() { - final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(buildSettings()); + final AzureStorageService azureStorageService = new AzureStorageService(buildSettings()); final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(nullValue())); } public void testGetSelectedClientBackoffPolicy() { - final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(buildSettings()); + final AzureStorageService azureStorageService = new AzureStorageService(buildSettings()); final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); @@ -190,7 +190,7 @@ public void testGetSelectedClientBackoffPolicyNbRetries() { .put("azure.client.azure1.max_retries", 7) .build(); - final AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(timeoutSettings); + final AzureStorageService azureStorageService = new AzureStorageService(timeoutSettings); final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); @@ -200,7 +200,7 @@ public void testNoProxy() { final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .build(); - final AzureStorageServiceImpl mock = new AzureStorageServiceImpl(settings); + final AzureStorageService mock = new AzureStorageService(settings); assertThat(mock.storageSettings.get("azure1").getProxy(), nullValue()); assertThat(mock.storageSettings.get("azure2").getProxy(), nullValue()); assertThat(mock.storageSettings.get("azure3").getProxy(), nullValue()); @@ -213,7 +213,7 @@ public void testProxyHttp() throws UnknownHostException { .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", "http") .build(); - final AzureStorageServiceImpl mock = new AzureStorageServiceImpl(settings); + final AzureStorageService mock = new AzureStorageService(settings); final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); @@ -233,7 +233,7 @@ public void testMultipleProxies() throws UnknownHostException { .put("azure.client.azure2.proxy.port", 8081) .put("azure.client.azure2.proxy.type", "http") .build(); - final AzureStorageServiceImpl mock = new AzureStorageServiceImpl(settings); + final AzureStorageService mock = new AzureStorageService(settings); final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.HTTP)); @@ -252,7 +252,7 @@ public void testProxySocks() throws UnknownHostException { .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", "socks") .build(); - final AzureStorageServiceImpl mock = new AzureStorageServiceImpl(settings); + final AzureStorageService mock = new AzureStorageService(settings); final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.SOCKS)); @@ -267,7 +267,7 @@ public void testProxyNoHost() { .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .build(); - final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceImpl(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageService(settings)); assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); } @@ -278,7 +278,7 @@ public void testProxyNoPort() { .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .build(); - final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceImpl(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageService(settings)); assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); } @@ -289,7 +289,7 @@ public void testProxyNoType() { .put("azure.client.azure1.proxy.port", 8080) .build(); - final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceImpl(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageService(settings)); assertEquals("Azure Proxy port or host have been set but proxy type is not defined.", e.getMessage()); } @@ -301,7 +301,7 @@ public void testProxyWrongHost() { .put("azure.client.azure1.proxy.port", 8080) .build(); - final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageServiceImpl(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageService(settings)); assertEquals("Azure proxy host is unknown.", e.getMessage()); } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureTestUtils.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureTestUtils.java deleted file mode 100644 index 52ff8a7faa49a..0000000000000 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureTestUtils.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.azure; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.SecureSettings; - -public class AzureTestUtils { - /** - * Mock secure settings from sysprops when running integration tests with ThirdParty annotation. - * Start the tests with {@code -Dtests.azure.account=AzureStorageAccount and -Dtests.azure.key=AzureStorageKey} - * @return Mock Settings from sysprops - */ - public static SecureSettings generateMockSecureSettings() { - MockSecureSettings secureSettings = new MockSecureSettings(); - - if (Strings.isEmpty(System.getProperty("tests.azure.account")) || - Strings.isEmpty(System.getProperty("tests.azure.key"))) { - throw new IllegalStateException("to run integration tests, you need to set -Dtests.thirdparty=true and " + - "-Dtests.azure.account=azure-account -Dtests.azure.key=azure-key"); - } - - secureSettings.setString("azure.client.default.account", System.getProperty("tests.azure.account")); - secureSettings.setString("azure.client.default.key", System.getProperty("tests.azure.key")); - - return secureSettings; - } -} diff --git a/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml b/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml index 65d02b5fadefc..c7b8949a11335 100644 --- a/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml +++ b/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml @@ -1,6 +1,6 @@ # Integration tests for repository-gcs --- -"Snapshot/Restore with repository-gcs": +setup: # Register repository - do: @@ -13,7 +13,8 @@ client: "integration_test" base_path: ${base_path} - - match: { acknowledged: true } +--- +"Snapshot/Restore with repository-gcs": # Get repository - do: @@ -171,6 +172,63 @@ repository: repository snapshot: snapshot-one +--- +"Register a repository with a non existing bucket": + + - do: + catch: /repository_exception/ + snapshot.create_repository: + repository: repository + body: + type: gcs + settings: + bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE + client: integration_test + +--- +"Register a repository with a non existing client": + + - do: + catch: /repository_exception/ + snapshot.create_repository: + repository: repository + body: + type: gcs + settings: + bucket: repository + client: unknown + +--- +"Get a non existing snapshot": + + - do: + catch: /snapshot_missing_exception/ + snapshot.get: + repository: repository + snapshot: missing + +--- +"Delete a non existing snapshot": + + - do: + catch: /snapshot_missing_exception/ + snapshot.delete: + repository: repository + snapshot: missing + +--- +"Restore a non existing snapshot": + + - do: + catch: /snapshot_restore_exception/ + snapshot.restore: + repository: repository + snapshot: missing + wait_for_completion: true + +--- +teardown: + # Remove our repository - do: snapshot.delete_repository: diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index c20b99790088e..2bfb3afc7d36c 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -54,6 +54,7 @@ import java.util.Map; import java.util.stream.Collectors; +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; import static java.net.HttpURLConnection.HTTP_PRECON_FAILED; class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore { @@ -163,16 +164,19 @@ boolean blobExists(String blobName) throws IOException { */ InputStream readBlob(String blobName) throws IOException { final BlobId blobId = BlobId.of(bucketName, blobName); - final Blob blob = SocketAccess.doPrivilegedIOException(() -> client().get(blobId)); - if (blob == null) { - throw new NoSuchFileException("Blob [" + blobName + "] does not exit"); - } - final ReadChannel readChannel = SocketAccess.doPrivilegedIOException(blob::reader); + final ReadChannel readChannel = SocketAccess.doPrivilegedIOException(() -> client().reader(blobId)); return Channels.newInputStream(new ReadableByteChannel() { @SuppressForbidden(reason = "Channel is based of a socket not a file") @Override public int read(ByteBuffer dst) throws IOException { - return SocketAccess.doPrivilegedIOException(() -> readChannel.read(dst)); + try { + return SocketAccess.doPrivilegedIOException(() -> readChannel.read(dst)); + } catch (StorageException e) { + if (e.getCode() == HTTP_NOT_FOUND) { + throw new NoSuchFileException("Blob [" + blobName + "] does not exist"); + } + throw e; + } } @Override diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java index 605d1798ee826..cf7395ea1f1f7 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java @@ -167,7 +167,27 @@ public Iterable getValues() { public ReadChannel reader(BlobId blob, BlobSourceOption... options) { if (bucketName.equals(blob.getBucket())) { final byte[] bytes = blobs.get(blob.getName()); - final ReadableByteChannel readableByteChannel = Channels.newChannel(new ByteArrayInputStream(bytes)); + + final ReadableByteChannel readableByteChannel; + if (bytes != null) { + readableByteChannel = Channels.newChannel(new ByteArrayInputStream(bytes)); + } else { + readableByteChannel = new ReadableByteChannel() { + @Override + public int read(ByteBuffer dst) throws IOException { + throw new StorageException(404, "Object not found"); + } + + @Override + public boolean isOpen() { + return false; + } + + @Override + public void close() throws IOException { + } + }; + } return new ReadChannel() { @Override public void close() { diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java index 20e21675acb79..d1034aff48248 100644 --- a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java +++ b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java @@ -174,10 +174,8 @@ private static PathTrie defaultHandlers(final Map clientReference.client().deleteObject(blobStore.bucket(), buildKey(blobName))); } catch (final AmazonClientException e) { throw new IOException("Exception when deleting blob [" + blobName + "]", e); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractAwsTestCase.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractAwsTestCase.java deleted file mode 100644 index f004145e7ff58..0000000000000 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractAwsTestCase.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.s3; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.env.Environment; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ThirdParty; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; - -/** - * Base class for AWS tests that require credentials. - *

- * You must specify {@code -Dtests.thirdparty=true -Dtests.config=/path/to/config} - * in order to run these tests. - */ -@ThirdParty -public abstract class AbstractAwsTestCase extends ESIntegTestCase { - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - Settings.Builder settings = Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put("cloud.aws.test.random", randomInt()) - .put("cloud.aws.test.write_failures", 0.1) - .put("cloud.aws.test.read_failures", 0.1); - - // if explicit, just load it and don't load from env - try { - if (Strings.hasText(System.getProperty("tests.config"))) { - try { - settings.loadFromPath(PathUtils.get(System.getProperty("tests.config"))); - } catch (IOException e) { - throw new IllegalArgumentException("could not load aws tests config", e); - } - } else { - throw new IllegalStateException("to run integration tests, you need to set -Dtests.thirdparty=true and -Dtests.config=/path/to/elasticsearch.yml"); - } - } catch (SettingsException exception) { - throw new IllegalStateException("your test configuration file is incorrect: " + System.getProperty("tests.config"), exception); - } - return settings.build(); - } - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(TestAwsS3Service.TestPlugin.class); - } -} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java deleted file mode 100644 index 439927acb4e70..0000000000000 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java +++ /dev/null @@ -1,409 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.s3; - -import com.amazonaws.services.s3.model.DeleteObjectsRequest; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.S3ObjectSummary; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; -import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; -import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.ClusterAdminClient; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.repositories.RepositoryMissingException; -import org.elasticsearch.repositories.RepositoryVerificationException; -import org.elasticsearch.snapshots.SnapshotMissingException; -import org.elasticsearch.snapshots.SnapshotState; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.junit.After; -import org.junit.Before; - -import java.util.ArrayList; -import java.util.List; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.notNullValue; - -@ClusterScope(scope = Scope.SUITE, numDataNodes = 2, numClientNodes = 0, transportClientRatio = 0.0) -public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase { - - private String basePath; - - @Before - public final void wipeBefore() { - wipeRepositories(); - basePath = "repo-" + randomInt(); - cleanRepositoryFiles(basePath); - } - - @After - public final void wipeAfter() { - wipeRepositories(); - cleanRepositoryFiles(basePath); - } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-aws/issues/211") - public void testEncryption() { - Client client = client(); - logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath); - - Settings repositorySettings = Settings.builder() - .put(S3Repository.BASE_PATH_SETTING.getKey(), basePath) - .put(S3Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000)) - .put(S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), true) - .build(); - - PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") - .setType("s3").setSettings(repositorySettings).get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - - createIndex("test-idx-1", "test-idx-2", "test-idx-3"); - ensureGreen(); - - logger.info("--> indexing some data"); - for (int i = 0; i < 100; i++) { - index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); - index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); - index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i); - } - refresh(); - assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L)); - assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits(), equalTo(100L)); - assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().getTotalHits(), equalTo(100L)); - - logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); - - assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); - - Settings settings = internalCluster().getInstance(Settings.class); - Settings bucket = settings.getByPrefix("repositories.s3."); - try (AmazonS3Reference s3Client = internalCluster().getInstance(S3Service.class).client("default")) { - String bucketName = bucket.get("bucket"); - logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath); - List summaries = s3Client.client().listObjects(bucketName, basePath).getObjectSummaries(); - for (S3ObjectSummary summary : summaries) { - assertThat(s3Client.client().getObjectMetadata(bucketName, summary.getKey()).getSSEAlgorithm(), equalTo("AES256")); - } - } - - logger.info("--> delete some data"); - for (int i = 0; i < 50; i++) { - client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get(); - } - for (int i = 50; i < 100; i++) { - client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get(); - } - for (int i = 0; i < 100; i += 2) { - client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get(); - } - refresh(); - assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(50L)); - assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits(), equalTo(50L)); - assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().getTotalHits(), equalTo(50L)); - - logger.info("--> close indices"); - client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get(); - - logger.info("--> restore all indices from the snapshot"); - RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet(); - assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); - - ensureGreen(); - assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L)); - assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits(), equalTo(100L)); - assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().getTotalHits(), equalTo(50L)); - - // Test restore after index deletion - logger.info("--> delete indices"); - cluster().wipeIndices("test-idx-1", "test-idx-2"); - logger.info("--> restore one index after deletion"); - restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet(); - assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); - ensureGreen(); - assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L)); - ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); - assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true)); - assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false)); - } - - /** - * This test verifies that the test configuration is set up in a manner that - * does not make the test {@link #testRepositoryWithCustomCredentials()} pointless. - */ - public void testRepositoryWithCustomCredentialsIsNotAccessibleByDefaultCredentials() { - Client client = client(); - Settings bucketSettings = internalCluster().getInstance(Settings.class).getByPrefix("repositories.s3.private-bucket."); - logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath); - try { - client.admin().cluster().preparePutRepository("test-repo") - .setType("s3").setSettings(Settings.builder() - .put(S3Repository.BASE_PATH_SETTING.getKey(), basePath) - .put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket")) - ).get(); - fail("repository verification should have raise an exception!"); - } catch (RepositoryVerificationException e) { - } - } - - public void testRepositoryWithBasePath() { - Client client = client(); - PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") - .setType("s3").setSettings(Settings.builder() - .put(S3Repository.BASE_PATH_SETTING.getKey(), basePath) - ).get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - - assertRepositoryIsOperational(client, "test-repo"); - } - - public void testRepositoryWithCustomCredentials() { - Client client = client(); - Settings bucketSettings = internalCluster().getInstance(Settings.class).getByPrefix("repositories.s3.private-bucket."); - logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath); - PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") - .setType("s3").setSettings(Settings.builder() - .put(S3Repository.BASE_PATH_SETTING.getKey(), basePath) - .put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket")) - ).get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - - assertRepositoryIsOperational(client, "test-repo"); - } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-aws/issues/211") - public void testRepositoryWithCustomEndpointProtocol() { - Client client = client(); - Settings bucketSettings = internalCluster().getInstance(Settings.class).getByPrefix("repositories.s3.external-bucket."); - logger.info("--> creating s3 repostoriy with endpoint [{}], bucket[{}] and path [{}]", bucketSettings.get("endpoint"), bucketSettings.get("bucket"), basePath); - PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") - .setType("s3").setSettings(Settings.builder() - .put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket")) - .put(S3Repository.BASE_PATH_SETTING.getKey(), basePath) - ).get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - assertRepositoryIsOperational(client, "test-repo"); - } - - /** - * This test verifies that the test configuration is set up in a manner that - * does not make the test {@link #testRepositoryInRemoteRegion()} pointless. - */ - public void testRepositoryInRemoteRegionIsRemote() { - Client client = client(); - Settings bucketSettings = internalCluster().getInstance(Settings.class).getByPrefix("repositories.s3.remote-bucket."); - logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath); - try { - client.admin().cluster().preparePutRepository("test-repo") - .setType("s3").setSettings(Settings.builder() - .put(S3Repository.BASE_PATH_SETTING.getKey(), basePath) - .put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket")) - // Below setting intentionally omitted to assert bucket is not available in default region. - // .put("region", privateBucketSettings.get("region")) - ).get(); - fail("repository verification should have raise an exception!"); - } catch (RepositoryVerificationException e) { - } - } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-aws/issues/211") - public void testRepositoryInRemoteRegion() { - Client client = client(); - Settings settings = internalCluster().getInstance(Settings.class); - Settings bucketSettings = settings.getByPrefix("repositories.s3.remote-bucket."); - logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath); - PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") - .setType("s3").setSettings(Settings.builder() - .put(S3Repository.BASE_PATH_SETTING.getKey(), basePath) - .put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket")) - ).get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - - assertRepositoryIsOperational(client, "test-repo"); - } - - /** - * Test case for issue #86: https://github.com/elastic/elasticsearch-cloud-aws/issues/86 - */ - public void testNonExistingRepo86() { - Client client = client(); - logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath); - PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") - .setType("s3").setSettings(Settings.builder() - .put(S3Repository.BASE_PATH_SETTING.getKey(), basePath) - ).get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - - logger.info("--> restore non existing snapshot"); - try { - client.admin().cluster().prepareRestoreSnapshot("test-repo", "no-existing-snapshot").setWaitForCompletion(true).execute().actionGet(); - fail("Shouldn't be here"); - } catch (SnapshotMissingException ex) { - // Expected - } - } - - /** - * For issue #86: https://github.com/elastic/elasticsearch-cloud-aws/issues/86 - */ - public void testGetDeleteNonExistingSnapshot86() { - ClusterAdminClient client = client().admin().cluster(); - logger.info("--> creating s3 repository without any path"); - PutRepositoryResponse putRepositoryResponse = client.preparePutRepository("test-repo") - .setType("s3").setSettings(Settings.builder() - .put(S3Repository.BASE_PATH_SETTING.getKey(), basePath) - ).get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - - try { - client.prepareGetSnapshots("test-repo").addSnapshots("no-existing-snapshot").get(); - fail("Shouldn't be here"); - } catch (SnapshotMissingException ex) { - // Expected - } - - try { - client.prepareDeleteSnapshot("test-repo", "no-existing-snapshot").get(); - fail("Shouldn't be here"); - } catch (SnapshotMissingException ex) { - // Expected - } - } - - private void assertRepositoryIsOperational(Client client, String repository) { - createIndex("test-idx-1"); - ensureGreen(); - - logger.info("--> indexing some data"); - for (int i = 0; i < 100; i++) { - index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); - } - refresh(); - assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L)); - - logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repository, "test-snap").setWaitForCompletion(true).setIndices("test-idx-*").get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); - - assertThat(client.admin().cluster().prepareGetSnapshots(repository).setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); - - logger.info("--> delete some data"); - for (int i = 0; i < 50; i++) { - client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get(); - } - refresh(); - assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(50L)); - - logger.info("--> close indices"); - client.admin().indices().prepareClose("test-idx-1").get(); - - logger.info("--> restore all indices from the snapshot"); - RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot(repository, "test-snap").setWaitForCompletion(true).execute().actionGet(); - assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); - - ensureGreen(); - assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L)); - } - - - /** - * Deletes repositories, supports wildcard notation. - */ - public static void wipeRepositories(String... repositories) { - // if nothing is provided, delete all - if (repositories.length == 0) { - repositories = new String[]{"*"}; - } - for (String repository : repositories) { - try { - client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet(); - } catch (RepositoryMissingException ex) { - // ignore - } - } - } - - /** - * Deletes content of the repository files in the bucket - */ - public void cleanRepositoryFiles(String basePath) { - Settings settings = internalCluster().getInstance(Settings.class); - Settings[] buckets = { - settings.getByPrefix("repositories.s3."), - settings.getByPrefix("repositories.s3.private-bucket."), - settings.getByPrefix("repositories.s3.remote-bucket."), - settings.getByPrefix("repositories.s3.external-bucket.") - }; - for (Settings bucket : buckets) { - String bucketName = bucket.get("bucket"); - - // We check that settings has been set in elasticsearch.yml integration test file - // as described in README - assertThat("Your settings in elasticsearch.yml are incorrect. Check README file.", bucketName, notNullValue()); - try (AmazonS3Reference s3Client = internalCluster().getInstance(S3Service.class).client("default")) { - ObjectListing prevListing = null; - //From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html - //we can do at most 1K objects per delete - //We don't know the bucket name until first object listing - DeleteObjectsRequest multiObjectDeleteRequest = null; - ArrayList keys = new ArrayList<>(); - while (true) { - ObjectListing list; - if (prevListing != null) { - list = s3Client.client().listNextBatchOfObjects(prevListing); - } else { - list = s3Client.client().listObjects(bucketName, basePath); - multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); - } - for (S3ObjectSummary summary : list.getObjectSummaries()) { - keys.add(new DeleteObjectsRequest.KeyVersion(summary.getKey())); - //Every 500 objects batch the delete request - if (keys.size() > 500) { - multiObjectDeleteRequest.setKeys(keys); - s3Client.client().deleteObjects(multiObjectDeleteRequest); - multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); - keys.clear(); - } - } - if (list.isTruncated()) { - prevListing = list; - } else { - break; - } - } - if (!keys.isEmpty()) { - multiObjectDeleteRequest.setKeys(keys); - s3Client.client().deleteObjects(multiObjectDeleteRequest); - } - } catch (Exception ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("Failed to delete S3 repository [{}]", bucketName), ex); - } - } - } -} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java index d610e6d74a06d..b5fb01869ae8c 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java @@ -149,15 +149,9 @@ public ObjectListing listObjects(final ListObjectsRequest request) throws Amazon @Override public void deleteObject(final DeleteObjectRequest request) throws AmazonClientException { assertThat(request.getBucketName(), equalTo(bucket)); - - final String blobName = request.getKey(); - if (blobs.remove(blobName) == null) { - AmazonS3Exception exception = new AmazonS3Exception("[" + blobName + "] does not exist."); - exception.setStatusCode(404); - throw exception; - } + blobs.remove(request.getKey()); } - + @Override public void shutdown() { // TODO check close diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ProxiedSnapshotRestoreOverHttpsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ProxiedSnapshotRestoreOverHttpsTests.java deleted file mode 100644 index 667a75656b30e..0000000000000 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ProxiedSnapshotRestoreOverHttpsTests.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.s3; - -import org.elasticsearch.common.settings.Settings; -import org.junit.Before; - -/** - * This will only run if you define in your `elasticsearch.yml` file a s3 specific proxy - * cloud.aws.s3.proxy_host: mys3proxy.company.com - * cloud.aws.s3.proxy_port: 8080 - */ -public class S3ProxiedSnapshotRestoreOverHttpsTests extends AbstractS3SnapshotRestoreTest { - - private boolean proxySet = false; - - @Override - public Settings nodeSettings(int nodeOrdinal) { - Settings settings = super.nodeSettings(nodeOrdinal); - String proxyHost = settings.get("cloud.aws.s3.proxy_host"); - proxySet = proxyHost != null; - return settings; - } - - @Before - public void checkProxySettings() { - assumeTrue("we are expecting proxy settings in elasticsearch.yml file", proxySet); - } - -} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3SnapshotRestoreOverHttpTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3SnapshotRestoreOverHttpTests.java deleted file mode 100644 index 1c1a3457d7a04..0000000000000 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3SnapshotRestoreOverHttpTests.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.s3; - -import org.elasticsearch.common.settings.Settings; - -public class S3SnapshotRestoreOverHttpTests extends AbstractS3SnapshotRestoreTest { - @Override - public Settings nodeSettings(int nodeOrdinal) { - Settings.Builder settings = Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put("cloud.aws.s3.protocol", "http"); - return settings.build(); - } -} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3SnapshotRestoreOverHttpsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3SnapshotRestoreOverHttpsTests.java deleted file mode 100644 index b888d015836cd..0000000000000 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3SnapshotRestoreOverHttpsTests.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.s3; - -import org.elasticsearch.common.settings.Settings; - -public class S3SnapshotRestoreOverHttpsTests extends AbstractS3SnapshotRestoreTest { - @Override - public Settings nodeSettings(int nodeOrdinal) { - Settings.Builder settings = Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put("cloud.aws.s3.protocol", "https"); - return settings.build(); - } -} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAmazonS3.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAmazonS3.java deleted file mode 100644 index 0c762659a5fe0..0000000000000 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAmazonS3.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.s3; - -import com.amazonaws.AmazonClientException; -import com.amazonaws.AmazonServiceException; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.AmazonS3Exception; -import com.amazonaws.services.s3.model.ObjectMetadata; -import com.amazonaws.services.s3.model.PutObjectResult; -import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.UploadPartRequest; -import com.amazonaws.services.s3.model.UploadPartResult; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; - -import java.io.IOException; -import java.io.InputStream; -import java.io.UnsupportedEncodingException; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicLong; - -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomDouble; - -public class TestAmazonS3 extends AmazonS3Wrapper { - - protected final Logger logger = Loggers.getLogger(getClass()); - - private double writeFailureRate = 0.0; - private double readFailureRate = 0.0; - - private final String randomPrefix; - - ConcurrentMap accessCounts = new ConcurrentHashMap<>(); - - private long incrementAndGet(String path) { - AtomicLong value = accessCounts.get(path); - if (value == null) { - value = accessCounts.putIfAbsent(path, new AtomicLong(1)); - } - if (value != null) { - return value.incrementAndGet(); - } - return 1; - } - - public TestAmazonS3(AmazonS3 delegate, Settings settings) { - super(delegate); - randomPrefix = settings.get("cloud.aws.test.random"); - writeFailureRate = settings.getAsDouble("cloud.aws.test.write_failures", 0.0); - readFailureRate = settings.getAsDouble("cloud.aws.test.read_failures", 0.0); - } - - @Override - public PutObjectResult putObject(String bucketName, String key, InputStream input, ObjectMetadata metadata) throws AmazonClientException, AmazonServiceException { - if (shouldFail(bucketName, key, writeFailureRate)) { - final long length = metadata.getContentLength(); - final long partToRead = (long) (length * randomDouble()); - final byte[] buffer = new byte[1024]; - for (long cur = 0; cur < partToRead; cur += buffer.length) { - try { - input.read(buffer, 0, (int) ((partToRead - cur) > buffer.length ? buffer.length : partToRead - cur)); - } catch (final IOException ex) { - throw new ElasticsearchException("cannot read input stream", ex); - } - } - logger.info("--> random write failure on putObject method: throwing an exception for [bucket={}, key={}]", bucketName, key); - final AmazonS3Exception ex = new AmazonS3Exception("Random S3 exception"); - ex.setStatusCode(400); - ex.setErrorCode("RequestTimeout"); - throw ex; - } else { - return super.putObject(bucketName, key, input, metadata); - } - } - - @Override - public UploadPartResult uploadPart(UploadPartRequest request) throws AmazonClientException, AmazonServiceException { - if (shouldFail(request.getBucketName(), request.getKey(), writeFailureRate)) { - final long length = request.getPartSize(); - final long partToRead = (long) (length * randomDouble()); - final byte[] buffer = new byte[1024]; - for (long cur = 0; cur < partToRead; cur += buffer.length) { - try (InputStream input = request.getInputStream()){ - input.read(buffer, 0, (int) ((partToRead - cur) > buffer.length ? buffer.length : partToRead - cur)); - } catch (final IOException ex) { - throw new ElasticsearchException("cannot read input stream", ex); - } - } - logger.info("--> random write failure on uploadPart method: throwing an exception for [bucket={}, key={}]", request.getBucketName(), request.getKey()); - final AmazonS3Exception ex = new AmazonS3Exception("Random S3 write exception"); - ex.setStatusCode(400); - ex.setErrorCode("RequestTimeout"); - throw ex; - } else { - return super.uploadPart(request); - } - } - - @Override - public S3Object getObject(String bucketName, String key) throws AmazonClientException, AmazonServiceException { - if (shouldFail(bucketName, key, readFailureRate)) { - logger.info("--> random read failure on getObject method: throwing an exception for [bucket={}, key={}]", bucketName, key); - final AmazonS3Exception ex = new AmazonS3Exception("Random S3 read exception"); - ex.setStatusCode(404); - throw ex; - } else { - return super.getObject(bucketName, key); - } - } - - private boolean shouldFail(String bucketName, String key, double probability) { - if (probability > 0.0) { - String path = randomPrefix + "-" + bucketName + "+" + key; - path += "/" + incrementAndGet(path); - return Math.abs(hashCode(path)) < (Integer.MAX_VALUE * probability); - } else { - return false; - } - } - - private int hashCode(String path) { - try { - final MessageDigest digest = MessageDigest.getInstance("MD5"); - final byte[] bytes = digest.digest(path.getBytes("UTF-8")); - int i = 0; - return ((bytes[i++] & 0xFF) << 24) | ((bytes[i++] & 0xFF) << 16) - | ((bytes[i++] & 0xFF) << 8) | (bytes[i++] & 0xFF); - } catch (final UnsupportedEncodingException ex) { - throw new ElasticsearchException("cannot calculate hashcode", ex); - } catch (final NoSuchAlgorithmException ex) { - throw new ElasticsearchException("cannot calculate hashcode", ex); - } - } -} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java deleted file mode 100644 index 828d8ef850462..0000000000000 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.s3; - -import java.util.IdentityHashMap; - -import com.amazonaws.services.s3.AmazonS3; -import org.elasticsearch.common.settings.Settings; - -public class TestAwsS3Service extends S3Service { - public static class TestPlugin extends S3RepositoryPlugin { - public TestPlugin(Settings settings) { - super(settings, new TestAwsS3Service(settings)); - } - } - - IdentityHashMap clients = new IdentityHashMap<>(); - - public TestAwsS3Service(Settings settings) { - super(settings); - } - - @Override - public synchronized AmazonS3Reference client(String clientName) { - return new AmazonS3Reference(cachedWrapper(super.client(clientName))); - } - - private AmazonS3 cachedWrapper(AmazonS3Reference clientReference) { - TestAmazonS3 wrapper = clients.get(clientReference); - if (wrapper == null) { - wrapper = new TestAmazonS3(clientReference.client(), settings); - clients.put(clientReference, wrapper); - } - return wrapper; - } - - @Override - protected synchronized void releaseCachedClients() { - super.releaseCachedClients(); - clients.clear(); - } - -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/40_typed_keys.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/40_typed_keys.yml index 604be9ec99e00..6e799c2bfc500 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/40_typed_keys.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/suggest/40_typed_keys.yml @@ -34,8 +34,10 @@ setup: --- "Test typed keys parameter for suggesters": - skip: - version: " - 6.99.99" - reason: queying a context suggester with no context was deprecated in 7.0 +# version: " - 6.99.99" +# reason: queying a context suggester with no context was deprecated in 7.0 + version: "all" + reason: "Awaiting a fix: https://github.com/elastic/elasticsearch/issues/31698" features: "warnings" - do: diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index 93641574bde12..19572a6c212a2 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -325,6 +325,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endArray(); builder.field("ignore_unavailable", ignoreUnavailable()); builder.field("allow_no_indices", allowNoIndices()); + builder.field("forbid_aliases_to_multiple_indices", allowAliasesToMultipleIndices() == false); + builder.field("forbid_closed_indices", forbidClosedIndices()); + builder.field("ignore_aliases", ignoreAliases()); return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/support/TransportAction.java b/server/src/main/java/org/elasticsearch/action/support/TransportAction.java index 9db5bfd84b5e3..e861a9f8d3f6c 100644 --- a/server/src/main/java/org/elasticsearch/action/support/TransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/TransportAction.java @@ -59,23 +59,19 @@ public final Task execute(Request request, ActionListener listener) { * this method. */ Task task = taskManager.register("transport", actionName, request); - if (task == null) { - execute(null, request, listener); - } else { - execute(task, request, new ActionListener() { - @Override - public void onResponse(Response response) { - taskManager.unregister(task); - listener.onResponse(response); - } + execute(task, request, new ActionListener() { + @Override + public void onResponse(Response response) { + taskManager.unregister(task); + listener.onResponse(response); + } - @Override - public void onFailure(Exception e) { - taskManager.unregister(task); - listener.onFailure(e); - } - }); - } + @Override + public void onFailure(Exception e) { + taskManager.unregister(task); + listener.onFailure(e); + } + }); return task; } diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 7f3906ff5a251..94edb5a297afa 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -1017,6 +1017,18 @@ public static Setting simpleString(String key, Validator validat return new Setting<>(new SimpleKey(key), null, s -> "", Function.identity(), validator, properties); } + /** + * Creates a new Setting instance with a String value + * + * @param key the settings key for this setting. + * @param defaultValue the default String value. + * @param properties properties for this setting like scope, filtering... + * @return the Setting Object + */ + public static Setting simpleString(String key, String defaultValue, Property... properties) { + return new Setting<>(key, s -> defaultValue, Function.identity(), properties); + } + public static int parseInt(String s, int minValue, String key) { return parseInt(s, minValue, Integer.MAX_VALUE, key); } diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index 275bc432942d3..297c12744cc26 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -389,19 +389,6 @@ public BlobStoreIndexShardSnapshot(String snapshot, long indexVersion, List= 0) { final String oldSnapshotIndexFile = INDEX_FILE_PREFIX + Long.toString(newGen - 2); - if (snapshotsBlobContainer.blobExists(oldSnapshotIndexFile)) { - snapshotsBlobContainer.deleteBlob(oldSnapshotIndexFile); - } + snapshotsBlobContainer.deleteBlobIgnoringIfNotExists(oldSnapshotIndexFile); } // write the current generation to the index-latest file @@ -679,9 +677,7 @@ protected void writeIndexGen(final RepositoryData repositoryData, final long rep bStream.writeLong(newGen); genBytes = bStream.bytes(); } - if (snapshotsBlobContainer.blobExists(INDEX_LATEST_BLOB)) { - snapshotsBlobContainer.deleteBlob(INDEX_LATEST_BLOB); - } + snapshotsBlobContainer.deleteBlobIgnoringIfNotExists(INDEX_LATEST_BLOB); logger.debug("Repository [{}] updating index.latest with generation [{}]", metadata.name(), newGen); writeAtomic(INDEX_LATEST_BLOB, genBytes); } @@ -702,9 +698,7 @@ void writeIncompatibleSnapshots(RepositoryData repositoryData) throws IOExceptio } bytes = bStream.bytes(); } - if (snapshotsBlobContainer.blobExists(INCOMPATIBLE_SNAPSHOTS_BLOB)) { - snapshotsBlobContainer.deleteBlob(INCOMPATIBLE_SNAPSHOTS_BLOB); - } + snapshotsBlobContainer.deleteBlobIgnoringIfNotExists(INCOMPATIBLE_SNAPSHOTS_BLOB); // write the incompatible snapshots blob writeAtomic(INCOMPATIBLE_SNAPSHOTS_BLOB, bytes); } diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskAwareRequest.java b/server/src/main/java/org/elasticsearch/tasks/TaskAwareRequest.java index 86ba59ebcc804..0b9dfba3e834f 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskAwareRequest.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskAwareRequest.java @@ -45,9 +45,6 @@ default void setParentTask(String parentTaskNode, long parentTaskId) { /** * Returns the task object that should be used to keep track of the processing of the request. - * - * A request can override this method and return null to avoid being tracked by the task - * manager. */ default Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { return new Task(id, type, action, getDescription(), parentTaskId, headers); diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java index 80427b197239d..73af26cfc708b 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java @@ -45,6 +45,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; @@ -91,8 +92,6 @@ public void setTaskResultsService(TaskResultsService taskResultsService) { /** * Registers a task without parent task - *

- * Returns the task manager tracked task or null if the task doesn't support the task manager */ public Task register(String type, String action, TaskAwareRequest request) { Map headers = new HashMap<>(); @@ -110,9 +109,7 @@ public Task register(String type, String action, TaskAwareRequest request) { } } Task task = request.createTask(taskIdGenerator.incrementAndGet(), type, action, request.getParentTask(), headers); - if (task == null) { - return null; - } + Objects.requireNonNull(task); assert task.getParentTaskId().equals(request.getParentTask()) : "Request [ " + request + "] didn't preserve it parentTaskId"; if (logger.isTraceEnabled()) { logger.trace("register {} [{}] [{}] [{}]", task.getId(), type, action, task.getDescription()); diff --git a/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java b/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java index 4e09daf9ccf0a..7887dd2c7caca 100644 --- a/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java +++ b/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java @@ -58,17 +58,13 @@ public Request newRequest(StreamInput in) throws IOException { public void processMessageReceived(Request request, TransportChannel channel) throws Exception { final Task task = taskManager.register(channel.getChannelType(), action, request); - if (task == null) { - handler.messageReceived(request, channel, null); - } else { - boolean success = false; - try { - handler.messageReceived(request, new TaskTransportChannel(taskManager, task, channel), task); - success = true; - } finally { - if (success == false) { - taskManager.unregister(task); - } + boolean success = false; + try { + handler.messageReceived(request, new TaskTransportChannel(taskManager, task, channel), task); + success = true; + } finally { + if (success == false) { + taskManager.unregister(task); } } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index 9175bc69bf642..edc79db79422d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -79,7 +79,6 @@ public class TransportTasksActionTests extends TaskManagerTestCase { public static class NodeRequest extends BaseNodeRequest { protected String requestName; - private boolean enableTaskManager; public NodeRequest() { super(); @@ -88,82 +87,63 @@ public NodeRequest() { public NodeRequest(NodesRequest request, String nodeId) { super(nodeId); requestName = request.requestName; - enableTaskManager = request.enableTaskManager; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); requestName = in.readString(); - enableTaskManager = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(requestName); - out.writeBoolean(enableTaskManager); } @Override public String getDescription() { - return "CancellableNodeRequest[" + requestName + ", " + enableTaskManager + "]"; + return "CancellableNodeRequest[" + requestName + "]"; } @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - if (enableTaskManager) { - return super.createTask(id, type, action, parentTaskId, headers); - } else { - return null; - } + return super.createTask(id, type, action, parentTaskId, headers); } } public static class NodesRequest extends BaseNodesRequest { private String requestName; - private boolean enableTaskManager; NodesRequest() { super(); } public NodesRequest(String requestName, String... nodesIds) { - this(requestName, true, nodesIds); - } - - public NodesRequest(String requestName, boolean enableTaskManager, String... nodesIds) { super(nodesIds); this.requestName = requestName; - this.enableTaskManager = enableTaskManager; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); requestName = in.readString(); - enableTaskManager = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(requestName); - out.writeBoolean(enableTaskManager); } @Override public String getDescription() { - return "CancellableNodesRequest[" + requestName + ", " + enableTaskManager + "]"; + return "CancellableNodesRequest[" + requestName + "]"; } @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - if (enableTaskManager) { - return super.createTask(id, type, action, parentTaskId, headers); - } else { - return null; - } + return super.createTask(id, type, action, parentTaskId, headers); } } @@ -400,7 +380,7 @@ public void onFailure(Exception e) { assertEquals(testNodes.length, response.getPerNodeTasks().size()); for (Map.Entry> entry : response.getPerNodeTasks().entrySet()) { assertEquals(1, entry.getValue().size()); - assertEquals("CancellableNodeRequest[Test Request, true]", entry.getValue().get(0).getDescription()); + assertEquals("CancellableNodeRequest[Test Request]", entry.getValue().get(0).getDescription()); } // Make sure that the main task on coordinating node is the task that was returned to us by execute() @@ -455,27 +435,6 @@ public void testFindChildTasks() throws Exception { assertEquals(0, responses.failureCount()); } - public void testTaskManagementOptOut() throws Exception { - setupTestNodes(Settings.EMPTY); - connectNodes(testNodes); - CountDownLatch checkLatch = new CountDownLatch(1); - // Starting actions that disable task manager - ActionFuture future = startBlockingTestNodesAction(checkLatch, new NodesRequest("Test Request", false)); - - TestNode testNode = testNodes[randomIntBetween(0, testNodes.length - 1)]; - - // Get the parent task - ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.setActions("testAction*"); - ListTasksResponse response = ActionTestUtils.executeBlocking(testNode.transportListTasksAction, listTasksRequest); - assertEquals(0, response.getTasks().size()); - - // Release all tasks and wait for response - checkLatch.countDown(); - NodesResponse responses = future.get(); - assertEquals(0, responses.failureCount()); - } - public void testTasksDescriptions() throws Exception { long minimalStartTime = System.currentTimeMillis(); setupTestNodes(Settings.EMPTY); @@ -502,7 +461,7 @@ public void testTasksDescriptions() throws Exception { assertEquals(testNodes.length, response.getPerNodeTasks().size()); for (Map.Entry> entry : response.getPerNodeTasks().entrySet()) { assertEquals(1, entry.getValue().size()); - assertEquals("CancellableNodeRequest[Test Request, true]", entry.getValue().get(0).getDescription()); + assertEquals("CancellableNodeRequest[Test Request]", entry.getValue().get(0).getDescription()); assertThat(entry.getValue().get(0).getStartTime(), greaterThanOrEqualTo(minimalStartTime)); assertThat(entry.getValue().get(0).getRunningTimeNanos(), greaterThanOrEqualTo(minimalDurationNanos)); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java index 9e484217870eb..1bde8ab572b72 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java @@ -20,8 +20,11 @@ package org.elasticsearch.action.admin.cluster.snapshots.create; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.IndicesOptions.Option; +import org.elasticsearch.action.support.IndicesOptions.WildcardStates; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent.MapParams; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; @@ -30,6 +33,10 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -37,14 +44,13 @@ public class CreateSnapshotRequestTests extends ESTestCase { // tests creating XContent and parsing with source(Map) equivalency - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31625") public void testToXContent() throws IOException { String repo = randomAlphaOfLength(5); String snap = randomAlphaOfLength(10); CreateSnapshotRequest original = new CreateSnapshotRequest(repo, snap); - if (randomBoolean()) { // replace + if (randomBoolean()) { List indices = new ArrayList<>(); int count = randomInt(3) + 1; @@ -55,11 +61,11 @@ public void testToXContent() throws IOException { original.indices(indices); } - if (randomBoolean()) { // replace + if (randomBoolean()) { original.partial(randomBoolean()); } - if (randomBoolean()) { // replace + if (randomBoolean()) { Map settings = new HashMap<>(); int count = randomInt(3) + 1; @@ -67,32 +73,31 @@ public void testToXContent() throws IOException { settings.put(randomAlphaOfLength(randomInt(3) + 2), randomAlphaOfLength(randomInt(3) + 2)); } + original.settings(settings); } - if (randomBoolean()) { // replace + if (randomBoolean()) { original.includeGlobalState(randomBoolean()); } - if (randomBoolean()) { // replace - IndicesOptions[] indicesOptions = new IndicesOptions[] { - IndicesOptions.STRICT_EXPAND_OPEN, - IndicesOptions.STRICT_EXPAND_OPEN_CLOSED, - IndicesOptions.LENIENT_EXPAND_OPEN, - IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED, - IndicesOptions.STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED}; + if (randomBoolean()) { + Collection wildcardStates = randomSubsetOf(Arrays.asList(WildcardStates.values())); + Collection