From 4cb219088ee9cfb9fc5986b790dbccf872e01a71 Mon Sep 17 00:00:00 2001 From: Yogesh Gaikwad <902768+bizybot@users.noreply.github.com> Date: Wed, 10 Jul 2019 11:19:40 +1000 Subject: [PATCH] fix and enable repository-hdfs secure tests (#44044) Due to recent changes are done for converting `repository-hdfs` to test clusters (#41252), the `integTestSecure*` tasks did not depend on `secureHdfsFixture` which when running would fail as the fixture would not be available. This commit adds the dependency of the fixture to the task. The `secureHdfsFixture` is a `AntFixture` which is spawned a process. Internally it waits for 30 seconds for the resources to be made available. For my local machine, it took almost 45 seconds to be available so I have added the wait time as an input to the `AntFixture` defaults to 30 seconds and set it to 60 seconds in case of secure hdfs fixture. The integ test for secure hdfs was disabled for a long time and so the changes done in #42090 to fix the tests are also done in this commit. --- .../gradle/test/AntFixture.groovy | 5 ++- plugins/repository-hdfs/build.gradle | 31 +++++++++++++-- .../hdfs/HaHdfsFailoverTestSuiteIT.java | 39 ++++++++++++------- .../src/main/java/hdfs/MiniHDFS.java | 4 +- 4 files changed, 59 insertions(+), 20 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/AntFixture.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/AntFixture.groovy index 8dcb862064ec9..32aca9e580839 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/AntFixture.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/AntFixture.groovy @@ -58,6 +58,9 @@ public class AntFixture extends AntTask implements Fixture { @Input boolean useShell = false + @Input + int maxWaitInSeconds = 30 + /** * A flag to indicate whether the fixture should be run in the foreground, or spawned. * It is protected so subclasses can override (eg RunTask). @@ -128,7 +131,7 @@ public class AntFixture extends AntTask implements Fixture { String failedProp = "failed${name}" // first wait for resources, or the failure marker from the wrapper script - ant.waitfor(maxwait: '30', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) { + ant.waitfor(maxwait: maxWaitInSeconds, maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) { or { resourceexists { file(file: failureMarker.toString()) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index e2d388252b0c8..258934e944e89 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -91,13 +91,13 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', dependsOn project.configurations.hdfsFixture, project(':test:fixtures:krb5kdc-fixture').tasks.postProcessFixture executable = new File(project.runtimeJavaHome, 'bin/java') env 'CLASSPATH', "${ -> project.configurations.hdfsFixture.asPath }" + maxWaitInSeconds 60 onlyIf { project(':test:fixtures:krb5kdc-fixture').buildFixture.enabled } waitCondition = { fixture, ant -> // the hdfs.MiniHDFS fixture writes the ports file when // it's ready, so we can just wait for the file to exist return fixture.portsFile.exists() } - final List miniHDFSArgs = [] // If it's a secure fixture, then depend on Kerberos Fixture and principals + add the krb5conf to the JVM options @@ -130,7 +130,7 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', } } -Set disabledIntegTestTaskNames = ['integTestSecure', 'integTestSecureHa'] +Set disabledIntegTestTaskNames = [] for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSecureHa']) { task "${integTestTaskName}"(type: RestIntegTestTask) { @@ -141,10 +141,35 @@ for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSec enabled = false; } + if (integTestTaskName.contains("Secure")) { + if (integTestTaskName.contains("Ha")) { + dependsOn secureHaHdfsFixture + } else { + dependsOn secureHdfsFixture + } + } + runner { + if (integTestTaskName.contains("Ha")) { + if (integTestTaskName.contains("Secure")) { + Path path = buildDir.toPath() + .resolve("fixtures") + .resolve("secureHaHdfsFixture") + .resolve("ports") + nonInputProperties.systemProperty "test.hdfs-fixture.ports", path + classpath += files(path) + } else { + Path path = buildDir.toPath() + .resolve("fixtures") + .resolve("haHdfsFixture") + .resolve("ports") + nonInputProperties.systemProperty "test.hdfs-fixture.ports", path + classpath += files(path) + } + } + if (integTestTaskName.contains("Secure")) { if (disabledIntegTestTaskNames.contains(integTestTaskName) == false) { - dependsOn secureHdfsFixture nonInputProperties.systemProperty "test.krb5.principal.es", "elasticsearch@${realm}" nonInputProperties.systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}" jvmArgs "-Djava.security.krb5.conf=${krb5conf}" diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java index 0248576b57384..e29cd14befdb7 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java @@ -19,16 +19,6 @@ package org.elasticsearch.repositories.hdfs; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.AccessController; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; -import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.BadFencingConfigurationException; import org.apache.hadoop.ha.HAServiceProtocol; @@ -46,6 +36,16 @@ import org.elasticsearch.test.rest.ESRestTestCase; import org.junit.Assert; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.List; + /** * Integration test that runs against an HA-Enabled HDFS instance */ @@ -57,13 +57,24 @@ public void testHAFailoverWithRepository() throws Exception { String esKerberosPrincipal = System.getProperty("test.krb5.principal.es"); String hdfsKerberosPrincipal = System.getProperty("test.krb5.principal.hdfs"); String kerberosKeytabLocation = System.getProperty("test.krb5.keytab.hdfs"); + String ports = System.getProperty("test.hdfs-fixture.ports"); + String nn1Port = "10001"; + String nn2Port = "10002"; + if (ports.length() > 0) { + final Path path = PathUtils.get(ports); + final List lines = AccessController.doPrivileged((PrivilegedExceptionAction>) () -> { + return Files.readAllLines(path); + }); + nn1Port = lines.get(0); + nn2Port = lines.get(1); + } boolean securityEnabled = hdfsKerberosPrincipal != null; Configuration hdfsConfiguration = new Configuration(); hdfsConfiguration.set("dfs.nameservices", "ha-hdfs"); hdfsConfiguration.set("dfs.ha.namenodes.ha-hdfs", "nn1,nn2"); - hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:10001"); - hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:10002"); + hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:" + nn1Port); + hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:" + nn2Port); hdfsConfiguration.set( "dfs.client.failover.proxy.provider.ha-hdfs", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider" @@ -110,8 +121,8 @@ public void testHAFailoverWithRepository() throws Exception { securityCredentials(securityEnabled, esKerberosPrincipal) + "\"conf.dfs.nameservices\": \"ha-hdfs\"," + "\"conf.dfs.ha.namenodes.ha-hdfs\": \"nn1,nn2\"," + - "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:10001\"," + - "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:10002\"," + + "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:"+nn1Port+"\"," + + "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:"+nn2Port+"\"," + "\"conf.dfs.client.failover.proxy.provider.ha-hdfs\": " + "\"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\"" + "}" + diff --git a/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java b/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java index b060d78b92b69..3bd6233225976 100644 --- a/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java +++ b/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java @@ -109,8 +109,8 @@ public static void main(String[] args) throws Exception { String haNameService = System.getProperty("ha-nameservice"); boolean haEnabled = haNameService != null; if (haEnabled) { - MiniDFSNNTopology.NNConf nn1 = new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001); - MiniDFSNNTopology.NNConf nn2 = new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002); + MiniDFSNNTopology.NNConf nn1 = new MiniDFSNNTopology.NNConf("nn1").setIpcPort(0); + MiniDFSNNTopology.NNConf nn2 = new MiniDFSNNTopology.NNConf("nn2").setIpcPort(0); MiniDFSNNTopology.NSConf nameservice = new MiniDFSNNTopology.NSConf(haNameService).addNN(nn1).addNN(nn2); MiniDFSNNTopology namenodeTopology = new MiniDFSNNTopology().addNameservice(nameservice); builder.nnTopology(namenodeTopology);