Skip to content

Commit

Permalink
Fix PREFETCH_BYTES_USEFUL stat calculation (#12251)
Browse files Browse the repository at this point in the history
Summary:
After refactoring of FilePrefetchBuffer, PREFETCH_BYTES_USEFUL was miscalculated. Instead of calculating how many requested bytes are already in the buffer, it took into account alignment as well because aligned_useful_len takes into consideration alignment too.

Also refactored the naming of chunk_offset_in_buffer to make it similar to aligned_useful_len

Pull Request resolved: #12251

Test Plan:
1. Validated internally through release validation benchmarks.
2. Updated unit test that fails without the fix.

Reviewed By: ajkr

Differential Revision: D52891112

Pulled By: akankshamahajan15

fbshipit-source-id: 2526a0b0572d473beaf8b841f2f9c2f6275d9779
  • Loading branch information
akankshamahajan15 authored and facebook-github-bot committed Jan 19, 2024
1 parent 4835c11 commit b5bb553
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 19 deletions.
30 changes: 15 additions & 15 deletions file/file_prefetch_buffer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ void FilePrefetchBuffer::PrepareBufferForRead(BufferInfo* buf, size_t alignment,
size_t roundup_len,
bool refit_tail,
uint64_t& aligned_useful_len) {
uint64_t chunk_offset_in_buffer = 0;
uint64_t aligned_useful_offset_in_buf = 0;
bool copy_data_to_new_buffer = false;
// Check if requested bytes are in the existing buffer_.
// If only a few bytes exist -- reuse them & read only what is really needed.
Expand All @@ -37,19 +37,19 @@ void FilePrefetchBuffer::PrepareBufferForRead(BufferInfo* buf, size_t alignment,
// Only a few requested bytes are in the buffer. memmove those chunk of
// bytes to the beginning, and memcpy them back into the new buffer if a
// new buffer is created.
chunk_offset_in_buffer =
aligned_useful_offset_in_buf =
Rounddown(static_cast<size_t>(offset - buf->offset_), alignment);
aligned_useful_len =
static_cast<uint64_t>(buf->CurrentSize()) - chunk_offset_in_buffer;
assert(chunk_offset_in_buffer % alignment == 0);
aligned_useful_len = static_cast<uint64_t>(buf->CurrentSize()) -
aligned_useful_offset_in_buf;
assert(aligned_useful_offset_in_buf % alignment == 0);
assert(aligned_useful_len % alignment == 0);
assert(chunk_offset_in_buffer + aligned_useful_len <=
assert(aligned_useful_offset_in_buf + aligned_useful_len <=
buf->offset_ + buf->CurrentSize());
if (aligned_useful_len > 0) {
copy_data_to_new_buffer = true;
} else {
// this reset is not necessary, but just to be safe.
chunk_offset_in_buffer = 0;
aligned_useful_offset_in_buf = 0;
}
}

Expand All @@ -60,11 +60,11 @@ void FilePrefetchBuffer::PrepareBufferForRead(BufferInfo* buf, size_t alignment,
buf->buffer_.Alignment(alignment);
buf->buffer_.AllocateNewBuffer(
static_cast<size_t>(roundup_len), copy_data_to_new_buffer,
chunk_offset_in_buffer, static_cast<size_t>(aligned_useful_len));
aligned_useful_offset_in_buf, static_cast<size_t>(aligned_useful_len));
} else if (aligned_useful_len > 0 && refit_tail) {
// New buffer not needed. But memmove bytes from tail to the beginning since
// aligned_useful_len is greater than 0.
buf->buffer_.RefitTail(static_cast<size_t>(chunk_offset_in_buffer),
buf->buffer_.RefitTail(static_cast<size_t>(aligned_useful_offset_in_buf),
static_cast<size_t>(aligned_useful_len));
} else if (aligned_useful_len > 0) {
// For async prefetching, it doesn't call RefitTail with aligned_useful_len
Expand All @@ -75,7 +75,7 @@ void FilePrefetchBuffer::PrepareBufferForRead(BufferInfo* buf, size_t alignment,
buf->buffer_.Alignment(alignment);
buf->buffer_.AllocateNewBuffer(
static_cast<size_t>(roundup_len), copy_data_to_new_buffer,
chunk_offset_in_buffer, static_cast<size_t>(aligned_useful_len));
aligned_useful_offset_in_buf, static_cast<size_t>(aligned_useful_len));
}
}

Expand Down Expand Up @@ -470,11 +470,8 @@ Status FilePrefetchBuffer::HandleOverlappingData(
overlap_buf_->offset_ = offset;
copy_to_overlap_buffer = true;

size_t initial_buf_size = overlap_buf_->CurrentSize();
CopyDataToBuffer(buf, tmp_offset, tmp_length);
UpdateStats(
/*found_in_buffer=*/false,
overlap_buf_->CurrentSize() - initial_buf_size);
UpdateStats(/*found_in_buffer=*/false, overlap_buf_->CurrentSize());

// Call async prefetching on freed buffer since data has been consumed
// only if requested data lies within next buffer.
Expand Down Expand Up @@ -635,11 +632,14 @@ Status FilePrefetchBuffer::PrefetchInternal(const IOOptions& opts,

// For length == 0, skip the synchronous prefetching. read_len1 will be 0.
if (length > 0) {
if (buf->IsOffsetInBuffer(offset)) {
UpdateStats(/*found_in_buffer=*/false,
(buf->offset_ + buf->CurrentSize() - offset));
}
ReadAheadSizeTuning(buf, /*read_curr_block=*/true, /*refit_tail*/
true, start_offset1, alignment, length, readahead_size,
start_offset1, end_offset1, read_len1,
aligned_useful_len1);
UpdateStats(/*found_in_buffer=*/false, aligned_useful_len1);
} else {
UpdateStats(/*found_in_buffer=*/true, original_length);
}
Expand Down
19 changes: 15 additions & 4 deletions file/prefetch_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3108,8 +3108,8 @@ TEST_F(FilePrefetchBufferTest, SyncReadaheadStats) {

// Simulate a block cache hit
fpb.UpdateReadPattern(4096, 4096, false);
// Now read some data that'll prefetch additional data from 12288 to 16384
// (4096) + 8192 (readahead_size).
// Now read some data that'll prefetch additional data from 12288 to 24576.
// (8192) + 8192 (readahead_size).
ASSERT_TRUE(
fpb.TryReadFromCache(IOOptions(), r.get(), 8192, 8192, &result, &s));
ASSERT_EQ(s, Status::OK());
Expand All @@ -3119,8 +3119,19 @@ TEST_F(FilePrefetchBufferTest, SyncReadaheadStats) {
ASSERT_TRUE(
fpb.TryReadFromCache(IOOptions(), r.get(), 12288, 4096, &result, &s));
ASSERT_EQ(s, Status::OK());
ASSERT_EQ(stats->getTickerCount(PREFETCH_HITS), 1);
ASSERT_EQ(stats->getTickerCount(PREFETCH_BYTES_USEFUL), 8192);
ASSERT_EQ(stats->getAndResetTickerCount(PREFETCH_HITS), 1);
ASSERT_EQ(stats->getAndResetTickerCount(PREFETCH_BYTES_USEFUL), 8192);

// Now read some data with length doesn't align with aligment and it needs
// prefetching. Read from 16000 with length 10000 (i.e. requested end offset -
// 26000).
ASSERT_TRUE(
fpb.TryReadFromCache(IOOptions(), r.get(), 16000, 10000, &result, &s));
ASSERT_EQ(s, Status::OK());
ASSERT_EQ(stats->getAndResetTickerCount(PREFETCH_HITS), 0);
ASSERT_EQ(
stats->getAndResetTickerCount(PREFETCH_BYTES_USEFUL),
/* 24576(end offset of the buffer) - 16000(requested offset) =*/8576);
}

} // namespace ROCKSDB_NAMESPACE
Expand Down

0 comments on commit b5bb553

Please sign in to comment.