Skip to content

Commit

Permalink
Fixing Additional Tests, there might still be some flaky ones
Browse files Browse the repository at this point in the history
Signed-off-by: Dharmesh 💤 <sdharms@amazon.com>
  • Loading branch information
psychbot authored and Sachin Kale committed Aug 31, 2023
1 parent d0c2b5b commit 6919141
Show file tree
Hide file tree
Showing 7 changed files with 69 additions and 50 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,18 @@
import org.opensearch.index.IndexSettings;
import org.opensearch.indices.recovery.IndexRecoveryIT;
import org.opensearch.indices.replication.common.ReplicationType;
import org.opensearch.repositories.fs.FsRepository;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.hamcrest.Matcher;
import org.hamcrest.Matchers;
import org.junit.After;
import org.junit.Before;

import java.nio.file.Path;
import java.util.Locale;

import static org.opensearch.action.admin.cluster.remotestore.RemoteStoreNode.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX;
import static org.opensearch.action.admin.cluster.remotestore.RemoteStoreNode.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT;
import static org.opensearch.action.admin.cluster.remotestore.RemoteStoreNode.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY;
import static org.opensearch.action.admin.cluster.remotestore.RemoteStoreNode.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY;
import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;

Expand All @@ -31,11 +35,34 @@ public class RemoteIndexRecoveryIT extends IndexRecoveryIT {

protected static final String REPOSITORY_NAME = "test-remote-store-repo";

protected Path absolutePath;

@Override
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(remoteStoreClusterSettings(REPOSITORY_NAME)).build();
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put(remoteStoreClusterSettings(REPOSITORY_NAME))
.put(repositoryNodeAttributes(REPOSITORY_NAME, FsRepository.TYPE, randomRepoPath().toAbsolutePath().toString()))
.build();
}

private Settings repositoryNodeAttributes(String name, String type, String location) {
String segmentRepoNameAttributeKey = "node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY;
String translogRepoNameAttributeKey = "node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY;
String typeAttributeKey = String.format(
Locale.getDefault(),
"node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT,
name
);
String settingsAttributeKey = String.format(
Locale.getDefault(),
"node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX,
name
);
return Settings.builder()
.put(segmentRepoNameAttributeKey, name)
.put(translogRepoNameAttributeKey, name)
.put(typeAttributeKey, type)
.put(settingsAttributeKey + "location", location)
.build();
}

@Override
Expand All @@ -47,17 +74,6 @@ protected Settings featureFlagSettings() {
.build();
}

@Before
@Override
public void setUp() throws Exception {
super.setUp();
internalCluster().startClusterManagerOnlyNode();
absolutePath = randomRepoPath().toAbsolutePath();
assertAcked(
clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath))
);
}

@Override
public Settings indexSettings() {
return Settings.builder()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -271,16 +271,8 @@ protected Settings remoteStoreIndexSettings(int numberOfReplicas, long totalFiel
.build();
}

protected void putRepository(Path path) {
putRepository(path, REPOSITORY_NAME);
}

protected void putRepository(Path path, String repoName) {
assertAcked(clusterAdmin().preparePutRepository(repoName).setType("fs").setSettings(Settings.builder().put("location", path)));
}

@After
public void teardown() {
public void teardown() throws Exception {
nodeAttributesSettings = null;
assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME));
assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_2_NAME));
Expand All @@ -304,7 +296,7 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {
return filesExisting.get();
}

void assertRepositoryMetadataPresentInClusterState() throws Exception {
public void assertRepositoryMetadataPresentInClusterState() throws Exception {
assertBusy(() -> {
RepositoriesMetadata repositoriesMetadata = client().admin()
.cluster()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,7 @@
import org.opensearch.test.InternalTestCluster;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.opensearch.test.transport.MockTransportService;
import org.junit.Before;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
Expand All @@ -29,7 +27,7 @@
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount;

@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 3)
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
public class RemoteStoreForceMergeIT extends RemoteStoreBaseIntegTestCase {

private static final String INDEX_NAME = "remote-store-test-idx-1";
Expand All @@ -41,11 +39,6 @@ protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(MockTransportService.TestPlugin.class);
}

@Before
public void setup() {
setupRepo();
}

@Override
public Settings indexSettings() {
return remoteStoreIndexSettings(0);
Expand Down Expand Up @@ -96,7 +89,10 @@ private void verifyRestoredData(Map<String, Long> indexStats, long deletedDocs)
}

private void testRestoreWithMergeFlow(int numberOfIterations, boolean invokeFlush, boolean flushAfterMerge, long deletedDocs)
throws IOException {
throws Exception {
internalCluster().startNodes(3);
ensureStableCluster(3);
assertRepositoryMetadataPresentInClusterState();
createIndex(INDEX_NAME, remoteStoreIndexSettings(0));
ensureYellowAndNoInitializingShards(INDEX_NAME);
ensureGreen(INDEX_NAME);
Expand Down Expand Up @@ -128,19 +124,19 @@ private void testRestoreWithMergeFlow(int numberOfIterations, boolean invokeFlus
// values for each of the flags, number of integ tests become 16 in comparison to current 2.
// We have run all the 16 tests on local and they run fine.
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9294")
public void testRestoreForceMergeSingleIteration() throws IOException {
public void testRestoreForceMergeSingleIteration() throws Exception {
boolean invokeFLush = randomBoolean();
boolean flushAfterMerge = randomBoolean();
testRestoreWithMergeFlow(1, invokeFLush, flushAfterMerge, randomIntBetween(0, 10));
}

public void testRestoreForceMergeMultipleIterations() throws IOException {
public void testRestoreForceMergeMultipleIterations() throws Exception {
boolean invokeFLush = randomBoolean();
boolean flushAfterMerge = randomBoolean();
testRestoreWithMergeFlow(randomIntBetween(2, 5), invokeFLush, flushAfterMerge, randomIntBetween(0, 10));
}

public void testRestoreForceMergeMultipleIterationsDeleteAll() throws IOException {
public void testRestoreForceMergeMultipleIterationsDeleteAll() throws Exception {
boolean invokeFLush = randomBoolean();
boolean flushAfterMerge = randomBoolean();
testRestoreWithMergeFlow(randomIntBetween(2, 3), invokeFLush, flushAfterMerge, -1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
import org.opensearch.cluster.service.ClusterService;
import org.opensearch.common.settings.Settings;
import org.opensearch.plugins.Plugin;
import org.opensearch.repositories.blobstore.BlobStoreRepository;
import org.opensearch.repositories.fs.FsRepository;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.opensearch.test.transport.MockTransportService;

Expand Down Expand Up @@ -47,6 +49,7 @@ private RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String na

Settings.Builder settings = Settings.builder();
settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue()));
settings.put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true);

return new RepositoryMetadata(name, type, settings.build());
}
Expand Down Expand Up @@ -88,4 +91,11 @@ public void testMultiNodeClusterRepositoryRegistrationWithMultipleMasters() thro
ensureStableCluster(6);
assertRemoteStoreRepositoryOnAllNodes();
}

public void testMultiNodeClusterRepositoryRegistrationDifferent() throws Exception {
internalCluster().startNodes(3);
internalCluster().startNode(remoteStoreNodeAttributes(REPOSITORY_NAME, FsRepository.TYPE, REPOSITORY_NAME, FsRepository.TYPE));
ensureStableCluster(3);
assertRemoteStoreRepositoryOnAllNodes();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.greaterThan;

@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 0)
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
public class RemoteStoreRestoreIT extends RemoteStoreBaseIntegTestCase {
private static final String INDEX_NAME = "remote-store-test-idx-1";
private static final String INDEX_NAMES = "test-remote-store-1,test-remote-store-2,remote-store-test-index-1,remote-store-test-index-2";
Expand Down Expand Up @@ -91,12 +91,12 @@ private void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, St
throws Exception {
internalCluster().startClusterManagerOnlyNodes(numClusterManagerNodes);
internalCluster().startDataOnlyNodes(numDataOnlyNodes);
assertRepositoryMetadataPresentInClusterState();
for (String index : indices.split(",")) {
createIndex(index, remoteStoreIndexSettings(replicaCount, shardCount));
ensureYellowAndNoInitializingShards(index);
ensureGreen(index);
}
assertRepositoryMetadataPresentInClusterState();
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,17 +38,19 @@
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;

@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 3)
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
public class RemoteStoreStatsIT extends RemoteStoreBaseIntegTestCase {

private static final String INDEX_NAME = "remote-store-test-idx-1";

@Before
public void setup() {
setupRepo();
public void setup() throws Exception {
internalCluster().startNodes(3);
ensureStableCluster(3);
assertRepositoryMetadataPresentInClusterState();
}

public void testStatsResponseFromAllNodes() {
public void testStatsResponseFromAllNodes() throws Exception {

// Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes
// during this time frame. This ensures that the segment upload has started.
Expand Down Expand Up @@ -104,7 +106,7 @@ public void testStatsResponseFromAllNodes() {
}
}

public void testStatsResponseAllShards() {
public void testStatsResponseAllShards() throws Exception {

// Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes
// during this time frame. This ensures that the segment upload has started.
Expand Down Expand Up @@ -149,7 +151,7 @@ public void testStatsResponseAllShards() {

}

public void testStatsResponseFromLocalNode() {
public void testStatsResponseFromLocalNode() throws Exception {

// Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes
// during this time frame. This ensures that the segment upload has started.
Expand Down Expand Up @@ -379,7 +381,7 @@ public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() thr
}
}

public void testStatsOnShardRelocation() {
public void testStatsOnShardRelocation() throws Exception {
// Scenario:
// - Create index with single primary and single replica shard
// - Index documents
Expand Down Expand Up @@ -460,7 +462,7 @@ public void testStatsOnShardUnassigned() throws IOException {
indexSingleDoc(INDEX_NAME);
}

public void testStatsOnRemoteStoreRestore() throws IOException {
public void testStatsOnRemoteStoreRestore() throws Exception {
// Creating an index with primary shard count == total nodes in cluster and 0 replicas
int dataNodeCount = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes();
createIndex(INDEX_NAME, remoteStoreIndexSettings(0, dataNodeCount));
Expand All @@ -475,7 +477,8 @@ public void testStatsOnRemoteStoreRestore() throws IOException {
ensureRed(INDEX_NAME);

// Start another data node to fulfil the previously launched capacity
internalCluster().startDataOnlyNode();
String nodeId = internalCluster().startDataOnlyNode();
ensureStableCluster(3);

// Restore index from remote store
assertAcked(client().admin().indices().prepareClose(INDEX_NAME));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3142,7 +3142,9 @@ public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, In

@Override
public void verify(String seed, DiscoveryNode localNode) {
assertSnapshotOrGenericThread();
if (isSystemRepository == false) {
assertSnapshotOrGenericThread();
}
if (isReadOnly()) {
try {
latestIndexBlobId();
Expand Down

0 comments on commit 6919141

Please sign in to comment.