Skip to content

Commit

Permalink
Fix Translog.Delete serialization for sequence numbers (elastic#22543)
Browse files Browse the repository at this point in the history
* Fix Translog.Delete serialization for sequence numbers

Translog.Delete used `.writeVLong` instead of `.writeLong` for the sequence
number and primary term (and their respective "read" variants). This could lead
to issues where a 5.x node sent a translog operation with a negative sequence
number (-2 for unassigned seq no) that tripped an assertion serializing a
negative number and causing ES to exit.

Adds a unit test for serialization and a mixed-cluster REST test, since that was
how this was originally caught.

* Use more realistic values for random seqNum and primary term

* Add comment with TODO for removal in 7.0

* Change comment into an assert
  • Loading branch information
dakrone authored Jan 11, 2017
1 parent 389ffc9 commit fed2a1a
Show file tree
Hide file tree
Showing 3 changed files with 87 additions and 10 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -855,7 +855,7 @@ public Index(StreamInput in) throws IOException {
in.readLong(); // ttl
}
this.versionType = VersionType.fromValue(in.readByte());
assert versionType.validateVersionForWrites(this.version);
assert versionType.validateVersionForWrites(this.version) : "invalid version for writes: " + this.version;
if (format >= FORMAT_AUTO_GENERATED_IDS) {
this.autoGeneratedIdTimestamp = in.readLong();
} else {
Expand Down Expand Up @@ -1036,8 +1036,8 @@ public Delete(StreamInput in) throws IOException {
this.versionType = VersionType.fromValue(in.readByte());
assert versionType.validateVersionForWrites(this.version);
if (format >= FORMAT_SEQ_NO) {
seqNo = in.readVLong();
primaryTerm = in.readVLong();
seqNo = in.readLong();
primaryTerm = in.readLong();
}
}

Expand Down Expand Up @@ -1100,8 +1100,8 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeString(uid.text());
out.writeLong(version);
out.writeByte(versionType.getValue());
out.writeVLong(seqNo);
out.writeVLong(primaryTerm);
out.writeLong(seqNo);
out.writeLong(primaryTerm);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.Term;
import org.apache.lucene.mockfile.FilterFileChannel;
import org.apache.lucene.store.AlreadyClosedException;
Expand All @@ -31,6 +34,7 @@
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LineFileDocs;
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
Expand All @@ -47,6 +51,12 @@
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.Engine.Operation.Origin;
import org.elasticsearch.index.mapper.ParseContext.Document;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.seqno.SequenceNumbersService;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.translog.Translog.Location;
Expand All @@ -67,6 +77,7 @@
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
Expand Down Expand Up @@ -297,14 +308,14 @@ public void testStats() throws IOException {
{
final TranslogStats stats = stats();
assertThat(stats.estimatedNumberOfOperations(), equalTo(2L));
assertThat(stats.getTranslogSizeInBytes(), equalTo(125L));
assertThat(stats.getTranslogSizeInBytes(), equalTo(139L));
}

translog.add(new Translog.Delete(newUid("3")));
{
final TranslogStats stats = stats();
assertThat(stats.estimatedNumberOfOperations(), equalTo(3L));
assertThat(stats.getTranslogSizeInBytes(), equalTo(153L));
assertThat(stats.getTranslogSizeInBytes(), equalTo(181L));
}

final long seqNo = 1;
Expand All @@ -313,10 +324,10 @@ public void testStats() throws IOException {
{
final TranslogStats stats = stats();
assertThat(stats.estimatedNumberOfOperations(), equalTo(4L));
assertThat(stats.getTranslogSizeInBytes(), equalTo(195L));
assertThat(stats.getTranslogSizeInBytes(), equalTo(223L));
}

final long expectedSizeInBytes = 238L;
final long expectedSizeInBytes = 266L;
translog.prepareCommit();
{
final TranslogStats stats = stats();
Expand Down Expand Up @@ -1993,4 +2004,47 @@ public void testPendingDelete() throws IOException {
public static Translog.Location randomTranslogLocation() {
return new Translog.Location(randomLong(), randomLong(), randomInt());
}

public void testTranslogOpSerialization() throws Exception {
BytesReference B_1 = new BytesArray(new byte[]{1});
SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
assert Version.CURRENT.major <= 6 : "Using UNASSIGNED_SEQ_NO can be removed in 7.0, because 6.0+ nodes have actual sequence numbers";
long randomSeqNum = randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : randomNonNegativeLong();
long randomPrimaryTerm = randomBoolean() ? 0 : randomNonNegativeLong();
seqID.seqNo.setLongValue(randomSeqNum);
seqID.seqNoDocValue.setLongValue(randomSeqNum);
seqID.primaryTerm.setLongValue(randomPrimaryTerm);
Field uidField = new Field("_uid", "1", UidFieldMapper.Defaults.FIELD_TYPE);
Field versionField = new NumericDocValuesField("_version", 1);
Document document = new Document();
document.add(new TextField("value", "test", Field.Store.YES));
document.add(uidField);
document.add(versionField);
document.add(seqID.seqNo);
document.add(seqID.seqNoDocValue);
document.add(seqID.primaryTerm);
ParsedDocument doc = new ParsedDocument(versionField, seqID, "1", "type", null, Arrays.asList(document), B_1, null);

Engine.Index eIndex = new Engine.Index(newUid("1"), doc, randomSeqNum, randomPrimaryTerm,
1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false);
Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, true);
Translog.Index index = new Translog.Index(eIndex, eIndexResult);

BytesStreamOutput out = new BytesStreamOutput();
index.writeTo(out);
StreamInput in = out.bytes().streamInput();
Translog.Index serializedIndex = new Translog.Index(in);
assertEquals(index, serializedIndex);

Engine.Delete eDelete = new Engine.Delete("type", "1", newUid("1"), randomSeqNum, randomPrimaryTerm,
2, VersionType.INTERNAL, Origin.PRIMARY, 0);
Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, true);
Translog.Delete delete = new Translog.Delete(eDelete, eDeleteResult);

out = new BytesStreamOutput();
delete.writeTo(out);
in = out.bytes().streamInput();
Translog.Delete serializedDelete = new Translog.Delete(in);
assertEquals(delete, serializedDelete);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,29 @@
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"f1": "v5_mixed", "f2": 9}'

- do:
index:
index: test_index
type: test_type
id: d10
body: {"f1": "v6_mixed", "f2": 10}

- do:
indices.flush:
index: test_index

- do:
search:
index: test_index

- match: { hits.total: 11 } # 5 docs from old cluster, 6 docs from mixed cluster

- do:
delete:
index: test_index
type: test_type
id: d10

- do:
indices.flush:
index: test_index
Expand All @@ -34,7 +57,7 @@
search:
index: test_index

- match: { hits.total: 10 } # 5 docs from old cluster, 5 docs from mixed cluster
- match: { hits.total: 10 }

---
"Verify custom cluster metadata still exists during upgrade":
Expand Down

0 comments on commit fed2a1a

Please sign in to comment.