Skip to content

Commit

Permalink
accounts-db: Benchmark cache evictions
Browse files Browse the repository at this point in the history
The already existing `concurrent_{read,scan}_write` benchmarks are not
sufficient for benchmarking the eviction and evaluating what kind of
eviction policy performs the best, because they don't fill up the cache,
so eviction never happens.

Add a new benchmark, which starts measuring the concurrent reads and
writes on a full cache.
  • Loading branch information
vadorovsky committed Dec 18, 2024
1 parent af0a349 commit f3b9d49
Show file tree
Hide file tree
Showing 11 changed files with 479 additions and 48 deletions.
52 changes: 51 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -375,6 +375,7 @@ merlin = "3"
min-max-heap = "1.3.0"
mockall = "0.11.4"
modular-bitfield = "0.11.2"
ndarray = "0.16.1"
nix = "0.29.0"
num-bigint = "0.4.6"
num-derive = "0.4"
Expand Down
5 changes: 5 additions & 0 deletions accounts-db/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ assert_matches = { workspace = true }
criterion = { workspace = true }
libsecp256k1 = { workspace = true }
memoffset = { workspace = true }
ndarray = { workspace = true }
rand_chacha = { workspace = true }
serde_bytes = { workspace = true }
# See order-crates-for-publishing.py for using this unusual `path = "."`
Expand Down Expand Up @@ -103,6 +104,10 @@ harness = false
name = "bench_hashing"
harness = false

[[bench]]
name = "read_only_accounts_cache"
harness = false

[[bench]]
name = "bench_serde"
harness = false
Expand Down
53 changes: 7 additions & 46 deletions accounts-db/benches/bench_accounts_file.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
#![allow(clippy::arithmetic_side_effects)]
use {
criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput},
rand::{distributions::WeightedIndex, prelude::*},
rand_chacha::ChaChaRng,
solana_accounts_db::{
accounts_file::StorageAccess,
append_vec::{self, AppendVec, SCAN_BUFFER_SIZE_WITHOUT_DATA},
append_vec::{self, AppendVec},
tiered_storage::{
file::TieredReadableFile,
hot::{HotStorageReader, HotStorageWriter},
Expand All @@ -15,13 +13,13 @@ use {
account::{AccountSharedData, ReadableAccount},
clock::Slot,
pubkey::Pubkey,
rent::Rent,
rent_collector::RENT_EXEMPT_RENT_EPOCH,
system_instruction::MAX_PERMITTED_DATA_LENGTH,
},
std::{iter, mem::ManuallyDrop},
std::mem::ManuallyDrop,
};

mod utils;

const ACCOUNTS_COUNTS: [usize; 4] = [
1, // the smallest count; will bench overhead
100, // number of accounts written per slot on mnb (with *no* rent rewrites)
Expand Down Expand Up @@ -102,54 +100,17 @@ fn bench_scan_pubkeys(c: &mut Criterion) {
let mut group = c.benchmark_group("scan_pubkeys");
let temp_dir = tempfile::tempdir().unwrap();

// distribution of account data sizes to use when creating accounts
// 3% of accounts have no data
// 75% of accounts are 165 bytes (a token account)
// 20% of accounts are 200 bytes (a stake account)
// 1% of accounts are 256 kibibytes (pathological case for the scan buffer)
// 1% of accounts are 10 mebibytes (the max size for an account)
let data_sizes = [
0,
165,
200,
SCAN_BUFFER_SIZE_WITHOUT_DATA,
MAX_PERMITTED_DATA_LENGTH as usize,
];
let weights = [3, 75, 20, 1, 1];
let distribution = WeightedIndex::new(weights).unwrap();

let rent = Rent::default();
let rent_minimum_balances: Vec<_> = data_sizes
.iter()
.map(|data_size| rent.minimum_balance(*data_size))
.collect();

for accounts_count in ACCOUNTS_COUNTS {
group.throughput(Throughput::Elements(accounts_count as u64));
let mut rng = ChaChaRng::seed_from_u64(accounts_count as u64);

let pubkeys: Vec<_> = iter::repeat_with(Pubkey::new_unique)
.take(accounts_count)
.collect();
let accounts: Vec<_> = iter::repeat_with(|| {
let index = distribution.sample(&mut rng);
AccountSharedData::new_rent_epoch(
rent_minimum_balances[index],
data_sizes[index],
&Pubkey::default(),
RENT_EXEMPT_RENT_EPOCH,
)
})
.take(pubkeys.len())
.collect();
let storable_accounts: Vec<_> = iter::zip(&pubkeys, &accounts).collect();
let storable_accounts: Vec<_> = utils::accounts(255).take(accounts_count).collect();

// create an append vec file
let append_vec_path = temp_dir.path().join(format!("append_vec_{accounts_count}"));
_ = std::fs::remove_file(&append_vec_path);
let file_size = accounts
let file_size = storable_accounts
.iter()
.map(|account| append_vec::aligned_stored_size(account.data().len()))
.map(|(_, account)| append_vec::aligned_stored_size(account.data().len()))
.sum();
let append_vec = AppendVec::new(append_vec_path, true, file_size);
let stored_accounts_info = append_vec
Expand Down
Loading

0 comments on commit f3b9d49

Please sign in to comment.