Skip to content

Commit

Permalink
Dry up Snapshot Integ Tests some More (#62856) (#63248)
Browse files Browse the repository at this point in the history
* Just some obvious drying up of these super complex tests.
* Mainly just shortening the diff of #61839 here by moving test utilities
to the abstract test case.
Also, making use of the now available functionality to simplify existing tests
and improve logging in them.
  • Loading branch information
original-brownbear authored Oct 5, 2020
1 parent a522e93 commit de6eeec
Show file tree
Hide file tree
Showing 9 changed files with 309 additions and 476 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@

import java.io.ByteArrayInputStream;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;

import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows;
import static org.hamcrest.Matchers.is;
Expand All @@ -49,11 +48,8 @@ public void testMasterFailoverDuringCleanup() throws Exception {
ensureStableCluster(nodeCount - 1);

logger.info("--> wait for cleanup to finish and disappear from cluster state");
assertBusy(() -> {
RepositoryCleanupInProgress cleanupInProgress =
client().admin().cluster().prepareState().get().getState().custom(RepositoryCleanupInProgress.TYPE);
assertFalse(cleanupInProgress.hasCleanupInProgress());
}, 30, TimeUnit.SECONDS);
awaitClusterState(state ->
state.custom(RepositoryCleanupInProgress.TYPE, RepositoryCleanupInProgress.EMPTY).hasCleanupInProgress() == false);
}

public void testRepeatCleanupsDontRemove() throws Exception {
Expand All @@ -71,11 +67,8 @@ public void testRepeatCleanupsDontRemove() throws Exception {
unblockNode("test-repo", masterNode);

logger.info("--> wait for cleanup to finish and disappear from cluster state");
assertBusy(() -> {
RepositoryCleanupInProgress cleanupInProgress =
client().admin().cluster().prepareState().get().getState().custom(RepositoryCleanupInProgress.TYPE);
assertFalse(cleanupInProgress.hasCleanupInProgress());
}, 30, TimeUnit.SECONDS);
awaitClusterState(state ->
state.custom(RepositoryCleanupInProgress.TYPE, RepositoryCleanupInProgress.EMPTY).hasCleanupInProgress() == false);
}

private String startBlockedCleanup(String repoName) throws Exception {
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,8 @@
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.metadata.RepositoriesMetadata;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
Expand All @@ -55,6 +53,7 @@
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
Expand Down Expand Up @@ -120,8 +119,7 @@ public void testConcurrentlyChangeRepositoryContents() throws Exception {
.put("compress", false)
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));

logger.info("--> delete snapshot");
client.admin().cluster().prepareDeleteSnapshot(repoName, snapshot).get();
startDeleteSnapshot(repoName, snapshot).get();

logger.info("--> make sure snapshot doesn't exist");
expectThrows(SnapshotMissingException.class, () -> client.admin().cluster().prepareGetSnapshots(repoName)
Expand Down Expand Up @@ -211,29 +209,10 @@ public void testFindDanglingLatestGeneration() throws Exception {
Files.move(repo.resolve("index-" + beforeMoveGen), repo.resolve("index-" + (beforeMoveGen + 1)));

logger.info("--> set next generation as pending in the cluster state");
final PlainActionFuture<Void> csUpdateFuture = PlainActionFuture.newFuture();
internalCluster().getCurrentMasterNodeInstance(ClusterService.class).submitStateUpdateTask("set pending generation",
new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState).metadata(Metadata.builder(currentState.getMetadata())
.putCustom(RepositoriesMetadata.TYPE,
currentState.metadata().<RepositoriesMetadata>custom(RepositoriesMetadata.TYPE).withUpdatedGeneration(
repository.getMetadata().name(), beforeMoveGen, beforeMoveGen + 1)).build()).build();
}

@Override
public void onFailure(String source, Exception e) {
csUpdateFuture.onFailure(e);
}

@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
csUpdateFuture.onResponse(null);
}
}
);
csUpdateFuture.get();
updateClusterState(currentState -> ClusterState.builder(currentState).metadata(Metadata.builder(currentState.getMetadata())
.putCustom(RepositoriesMetadata.TYPE,
currentState.metadata().<RepositoriesMetadata>custom(RepositoriesMetadata.TYPE).withUpdatedGeneration(
repository.getMetadata().name(), beforeMoveGen, beforeMoveGen + 1)).build()).build());

logger.info("--> full cluster restart");
internalCluster().fullRestart();
Expand All @@ -242,8 +221,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS
logger.info("--> verify index-N blob is found at the new location");
assertThat(getRepositoryData(repoName).getGenId(), is(beforeMoveGen + 1));

logger.info("--> delete snapshot");
client().admin().cluster().prepareDeleteSnapshot(repoName, snapshot).get();
startDeleteSnapshot(repoName, snapshot).get();

logger.info("--> verify index-N blob is found at the expected location");
assertThat(getRepositoryData(repoName).getGenId(), is(beforeMoveGen + 2));
Expand Down Expand Up @@ -303,7 +281,7 @@ public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception {
is(SnapshotsService.OLD_SNAPSHOT_FORMAT));

logger.info("--> verify that snapshot with missing root level metadata can be deleted");
assertAcked(client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotToCorrupt.getName()).get());
assertAcked(startDeleteSnapshot(repoName, snapshotToCorrupt.getName()).get());

logger.info("--> verify that repository is assumed in new metadata format after removing corrupted snapshot");
assertThat(PlainActionFuture.get(f -> threadPool.generic().execute(
Expand Down Expand Up @@ -353,7 +331,7 @@ public void testMountCorruptedRepositoryData() throws Exception {
expectThrows(RepositoryException.class, () -> getRepositoryData(otherRepo));
}

public void testHandleSnapshotErrorWithBwCFormat() throws IOException {
public void testHandleSnapshotErrorWithBwCFormat() throws IOException, ExecutionException, InterruptedException {
final String repoName = "test-repo";
final Path repoPath = randomRepoPath();
createRepository(repoName, "fs", repoPath);
Expand All @@ -377,13 +355,12 @@ public void testHandleSnapshotErrorWithBwCFormat() throws IOException {
assertFileExists(initialShardMetaPath);
Files.move(initialShardMetaPath, shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "1"));

logger.info("--> delete old version snapshot");
client().admin().cluster().prepareDeleteSnapshot(repoName, oldVersionSnapshot).get();
startDeleteSnapshot(repoName, oldVersionSnapshot).get();

createFullSnapshot(repoName, "snapshot-2");
}

public void testRepairBrokenShardGenerations() throws IOException {
public void testRepairBrokenShardGenerations() throws Exception {
final String repoName = "test-repo";
final Path repoPath = randomRepoPath();
createRepository(repoName, "fs", repoPath);
Expand All @@ -398,8 +375,7 @@ public void testRepairBrokenShardGenerations() throws IOException {

createFullSnapshot(repoName, "snapshot-1");

logger.info("--> delete old version snapshot");
client().admin().cluster().prepareDeleteSnapshot(repoName, oldVersionSnapshot).get();
startDeleteSnapshot(repoName, oldVersionSnapshot).get();

logger.info("--> move shard level metadata to new generation and make RepositoryData point at an older generation");
final IndexId indexId = getRepositoryData(repoName).resolveIndexId(indexName);
Expand Down
Loading

0 comments on commit de6eeec

Please sign in to comment.