diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/AntFixture.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/AntFixture.groovy index 8dcb862064ec9..32aca9e580839 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/AntFixture.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/AntFixture.groovy @@ -58,6 +58,9 @@ public class AntFixture extends AntTask implements Fixture { @Input boolean useShell = false + @Input + int maxWaitInSeconds = 30 + /** * A flag to indicate whether the fixture should be run in the foreground, or spawned. * It is protected so subclasses can override (eg RunTask). @@ -128,7 +131,7 @@ public class AntFixture extends AntTask implements Fixture { String failedProp = "failed${name}" // first wait for resources, or the failure marker from the wrapper script - ant.waitfor(maxwait: '30', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) { + ant.waitfor(maxwait: maxWaitInSeconds, maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) { or { resourceexists { file(file: failureMarker.toString()) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 035776ea7918e..dfe3d1b145a53 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -81,13 +81,13 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', dependsOn project.configurations.hdfsFixture, project(':test:fixtures:krb5kdc-fixture').tasks.postProcessFixture executable = new File(project.runtimeJavaHome, 'bin/java') env 'CLASSPATH', "${ -> project.configurations.hdfsFixture.asPath }" + maxWaitInSeconds 60 onlyIf { project(':test:fixtures:krb5kdc-fixture').buildFixture.enabled } waitCondition = { fixture, ant -> // the hdfs.MiniHDFS fixture writes the ports file when // it's ready, so we can just wait for the file to exist return fixture.portsFile.exists() } - final List miniHDFSArgs = [] // If it's a secure fixture, then depend on Kerberos Fixture and principals + add the krb5conf to the JVM options @@ -118,7 +118,7 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', } } -Set disabledIntegTestTaskNames = ['integTestSecure', 'integTestSecureHa'] +Set disabledIntegTestTaskNames = [] for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSecureHa']) { task "${integTestTaskName}"(type: RestIntegTestTask) { @@ -129,10 +129,35 @@ for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSec enabled = false; } + if (integTestTaskName.contains("Secure")) { + if (integTestTaskName.contains("Ha")) { + dependsOn secureHaHdfsFixture + } else { + dependsOn secureHdfsFixture + } + } + runner { + if (integTestTaskName.contains("Ha")) { + if (integTestTaskName.contains("Secure")) { + Path path = buildDir.toPath() + .resolve("fixtures") + .resolve("secureHaHdfsFixture") + .resolve("ports") + nonInputProperties.systemProperty "test.hdfs-fixture.ports", path + classpath += files(path) + } else { + Path path = buildDir.toPath() + .resolve("fixtures") + .resolve("haHdfsFixture") + .resolve("ports") + nonInputProperties.systemProperty "test.hdfs-fixture.ports", path + classpath += files(path) + } + } + if (integTestTaskName.contains("Secure")) { if (disabledIntegTestTaskNames.contains(integTestTaskName) == false) { - dependsOn secureHdfsFixture nonInputProperties.systemProperty "test.krb5.principal.es", "elasticsearch@${realm}" nonInputProperties.systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}" jvmArgs "-Djava.security.krb5.conf=${krb5conf}" diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java index 0248576b57384..e29cd14befdb7 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java @@ -19,16 +19,6 @@ package org.elasticsearch.repositories.hdfs; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.AccessController; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; -import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.BadFencingConfigurationException; import org.apache.hadoop.ha.HAServiceProtocol; @@ -46,6 +36,16 @@ import org.elasticsearch.test.rest.ESRestTestCase; import org.junit.Assert; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.List; + /** * Integration test that runs against an HA-Enabled HDFS instance */ @@ -57,13 +57,24 @@ public void testHAFailoverWithRepository() throws Exception { String esKerberosPrincipal = System.getProperty("test.krb5.principal.es"); String hdfsKerberosPrincipal = System.getProperty("test.krb5.principal.hdfs"); String kerberosKeytabLocation = System.getProperty("test.krb5.keytab.hdfs"); + String ports = System.getProperty("test.hdfs-fixture.ports"); + String nn1Port = "10001"; + String nn2Port = "10002"; + if (ports.length() > 0) { + final Path path = PathUtils.get(ports); + final List lines = AccessController.doPrivileged((PrivilegedExceptionAction>) () -> { + return Files.readAllLines(path); + }); + nn1Port = lines.get(0); + nn2Port = lines.get(1); + } boolean securityEnabled = hdfsKerberosPrincipal != null; Configuration hdfsConfiguration = new Configuration(); hdfsConfiguration.set("dfs.nameservices", "ha-hdfs"); hdfsConfiguration.set("dfs.ha.namenodes.ha-hdfs", "nn1,nn2"); - hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:10001"); - hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:10002"); + hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:" + nn1Port); + hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:" + nn2Port); hdfsConfiguration.set( "dfs.client.failover.proxy.provider.ha-hdfs", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" @@ -110,8 +121,8 @@ public void testHAFailoverWithRepository() throws Exception { securityCredentials(securityEnabled, esKerberosPrincipal) + "\"conf.dfs.nameservices\": \"ha-hdfs\"," + "\"conf.dfs.ha.namenodes.ha-hdfs\": \"nn1,nn2\"," + - "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:10001\"," + - "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:10002\"," + + "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:"+nn1Port+"\"," + + "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:"+nn2Port+"\"," + "\"conf.dfs.client.failover.proxy.provider.ha-hdfs\": " + "\"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\"" + "}" + diff --git a/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java b/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java index b060d78b92b69..3bd6233225976 100644 --- a/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java +++ b/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java @@ -109,8 +109,8 @@ public static void main(String[] args) throws Exception { String haNameService = System.getProperty("ha-nameservice"); boolean haEnabled = haNameService != null; if (haEnabled) { - MiniDFSNNTopology.NNConf nn1 = new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001); - MiniDFSNNTopology.NNConf nn2 = new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002); + MiniDFSNNTopology.NNConf nn1 = new MiniDFSNNTopology.NNConf("nn1").setIpcPort(0); + MiniDFSNNTopology.NNConf nn2 = new MiniDFSNNTopology.NNConf("nn2").setIpcPort(0); MiniDFSNNTopology.NSConf nameservice = new MiniDFSNNTopology.NSConf(haNameService).addNN(nn1).addNN(nn2); MiniDFSNNTopology namenodeTopology = new MiniDFSNNTopology().addNameservice(nameservice); builder.nnTopology(namenodeTopology);