Skip to content

Commit

Permalink
[Backport 2.11] [Remote Store] Using hash of node id in metadata file…
Browse files Browse the repository at this point in the history
… names (#10492)

* [Remote Store] Using hash of node id in metadata file names (#10480)

Signed-off-by: Gaurav Bafna <gbbafna@amazon.com>
Signed-off-by: Ashish Singh <ssashish@amazon.com>

* Fix failing testGetPrimaryTermAndGeneration in TranslogTransferManagerTests (#10490)

Signed-off-by: Ashish Singh <ssashish@amazon.com>

---------

Signed-off-by: Gaurav Bafna <gbbafna@amazon.com>
Signed-off-by: Ashish Singh <ssashish@amazon.com>
Co-authored-by: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com>
  • Loading branch information
ashking94 and gbbafna authored Oct 9, 2023
1 parent c5b64ee commit 38df519
Show file tree
Hide file tree
Showing 6 changed files with 29 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ public static String getSegmentName(String filename) {
* @param fn Function to extract PrimaryTerm_Generation and Node Id from metadata file name .
* fn returns null if node id is not part of the file name
*/
static public void verifyNoMultipleWriters(List<String> mdFiles, Function<String, Tuple<String, String>> fn) {
public static void verifyNoMultipleWriters(List<String> mdFiles, Function<String, Tuple<String, String>> fn) {
Map<String, String> nodesByPrimaryTermAndGen = new HashMap<>();
mdFiles.forEach(mdFile -> {
Tuple<String, String> nodeIdByPrimaryTermAndGen = fn.apply(mdFile);
Expand All @@ -91,10 +91,9 @@ static public void verifyNoMultipleWriters(List<String> mdFiles, Function<String
&& (!nodesByPrimaryTermAndGen.get(nodeIdByPrimaryTermAndGen.v1()).equals(nodeIdByPrimaryTermAndGen.v2()))) {
throw new IllegalStateException(
"Multiple metadata files from different nodes"
+ "having same primary term and generations "
+ nodeIdByPrimaryTermAndGen.v1()
+ " and "
+ nodeIdByPrimaryTermAndGen.v2()
+ "having same primary term and generations detected"
+ " detected "
);
}
nodesByPrimaryTermAndGen.put(nodeIdByPrimaryTermAndGen.v1(), nodeIdByPrimaryTermAndGen.v2());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
Expand Down Expand Up @@ -332,7 +333,7 @@ public static String getMetadataFilename(
RemoteStoreUtils.invertLong(generation),
RemoteStoreUtils.invertLong(translogGeneration),
RemoteStoreUtils.invertLong(uploadCounter),
nodeId,
String.valueOf(Objects.hash(nodeId)),
RemoteStoreUtils.invertLong(System.currentTimeMillis()),
String.valueOf(metadataVersion)
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ public String getFileName() {
RemoteStoreUtils.invertLong(primaryTerm),
RemoteStoreUtils.invertLong(generation),
RemoteStoreUtils.invertLong(createdAt),
nodeId,
String.valueOf(Objects.hash(nodeId)),
String.valueOf(CURRENT_VERSION)
)
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,10 +151,10 @@ public void testVerifyMultipleWriters_Segment() {
}

public void testVerifyMultipleWriters_Translog() throws InterruptedException {
TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2, "node-1");
TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2, "node--1");
String mdFilename = tm.getFileName();
Thread.sleep(1);
TranslogTransferMetadata tm2 = new TranslogTransferMetadata(1, 1, 1, 2, "node-1");
TranslogTransferMetadata tm2 = new TranslogTransferMetadata(1, 1, 1, 2, "node--1");
String mdFilename2 = tm2.getFileName();
List<BlobMetadata> bmList = new LinkedList<>();
bmList.add(new PlainBlobMetadata(mdFilename, 1));
Expand All @@ -167,7 +167,7 @@ public void testVerifyMultipleWriters_Translog() throws InterruptedException {

bmList = new LinkedList<>();
bmList.add(new PlainBlobMetadata(mdFilename, 1));
TranslogTransferMetadata tm3 = new TranslogTransferMetadata(1, 1, 1, 2, "node-2");
TranslogTransferMetadata tm3 = new TranslogTransferMetadata(1, 1, 1, 2, "node--2");
bmList.add(new PlainBlobMetadata(tm3.getFileName(), 1));
List<BlobMetadata> finalBmList = bmList;
assertThrows(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@
import org.mockito.Mockito;

import static org.opensearch.index.store.RemoteSegmentStoreDirectory.METADATA_FILES_TO_FETCH;
import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR;
import static org.opensearch.test.RemoteStoreTestUtils.createMetadataFileBytes;
import static org.opensearch.test.RemoteStoreTestUtils.getDummyMetadata;
import static org.hamcrest.CoreMatchers.is;
Expand Down Expand Up @@ -213,9 +214,7 @@ public void testUploadedSegmentMetadataFromStringException() {
}

public void testGetPrimaryTermGenerationUuid() {
String[] filenameTokens = "abc__9223372036854775795__9223372036854775784__uuid_xyz".split(
RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR
);
String[] filenameTokens = "abc__9223372036854775795__9223372036854775784__uuid_xyz".split(SEPARATOR);
assertEquals(12, RemoteSegmentStoreDirectory.MetadataFilenameUtils.getPrimaryTerm(filenameTokens));
assertEquals(23, RemoteSegmentStoreDirectory.MetadataFilenameUtils.getGeneration(filenameTokens));
}
Expand Down Expand Up @@ -1178,6 +1177,10 @@ public void testMetadataFileNameOrder() {
actualList.sort(String::compareTo);

assertEquals(List.of(file3, file2, file4, file6, file5, file1), actualList);

long count = file1.chars().filter(ch -> ch == SEPARATOR.charAt(0)).count();
// There should not be any `_` in mdFile name as it is used a separator .
assertEquals(14, count);
}

private static class WrapperIndexOutput extends IndexOutput {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,14 @@
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicInteger;

import org.mockito.Mockito;

import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyMap;
import static org.mockito.ArgumentMatchers.anySet;
Expand Down Expand Up @@ -503,8 +506,12 @@ private void assertTlogCkpDownloadStats() {
}

public void testGetPrimaryTermAndGeneration() {
String tm = new TranslogTransferMetadata(1, 2, 1, 2, "node-1").getFileName();
assertEquals(new Tuple<>(new Tuple<>(1L, 2L), "node-1"), TranslogTransferMetadata.getNodeIdByPrimaryTermAndGeneration(tm));
String nodeId = UUID.randomUUID().toString();
String tm = new TranslogTransferMetadata(1, 2, 1, 2, nodeId).getFileName();
Tuple<Tuple<Long, Long>, String> actualOutput = TranslogTransferMetadata.getNodeIdByPrimaryTermAndGeneration(tm);
assertEquals(1L, (long) (actualOutput.v1().v1()));
assertEquals(2L, (long) (actualOutput.v1().v2()));
assertEquals(String.valueOf(Objects.hash(nodeId)), actualOutput.v2());
}

public void testMetadataConflict() throws InterruptedException {
Expand All @@ -515,10 +522,13 @@ public void testMetadataConflict() throws InterruptedException {
null,
remoteTranslogTransferTracker
);
TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2, "node-1");
TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2, "node--1");
String mdFilename = tm.getFileName();
long count = mdFilename.chars().filter(ch -> ch == METADATA_SEPARATOR.charAt(0)).count();
// There should not be any `_` in mdFile name as it is used a separator .
assertEquals(10, count);
Thread.sleep(1);
TranslogTransferMetadata tm2 = new TranslogTransferMetadata(1, 1, 1, 2, "node-2");
TranslogTransferMetadata tm2 = new TranslogTransferMetadata(1, 1, 1, 2, "node--2");
String mdFilename2 = tm2.getFileName();

doAnswer(invocation -> {
Expand Down

0 comments on commit 38df519

Please sign in to comment.