Skip to content

Commit

Permalink
accounts-db: Benchmark cache evictions
Browse files Browse the repository at this point in the history
The already existing `concurrent_{read,scan}_write` benchmarks are not
sufficient for benchmarking the eviction and evaluating what kind of
eviction policy performs the best, because they don't fill up the cache,
so eviction never happens.

Add a new benchmark, which starts measuring the concurrent reads and
writes on a full cache.
  • Loading branch information
vadorovsky committed Dec 13, 2024
1 parent 0c26485 commit ba708a3
Show file tree
Hide file tree
Showing 7 changed files with 340 additions and 1 deletion.
52 changes: 51 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -367,6 +367,7 @@ merlin = "3"
min-max-heap = "1.3.0"
mockall = "0.11.4"
modular-bitfield = "0.11.2"
ndarray = "0.16.1"
nix = "0.29.0"
num-bigint = "0.4.6"
num-derive = "0.4"
Expand Down
5 changes: 5 additions & 0 deletions accounts-db/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ assert_matches = { workspace = true }
criterion = { workspace = true }
libsecp256k1 = { workspace = true }
memoffset = { workspace = true }
ndarray = { workspace = true }
rand_chacha = { workspace = true }
serde_bytes = { workspace = true }
# See order-crates-for-publishing.py for using this unusual `path = "."`
Expand Down Expand Up @@ -103,6 +104,10 @@ harness = false
name = "bench_hashing"
harness = false

[[bench]]
name = "read_only_accounts_cache"
harness = false

[[bench]]
name = "bench_serde"
harness = false
Expand Down
270 changes: 270 additions & 0 deletions accounts-db/benches/read_only_accounts_cache.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,270 @@
#![feature(test)]

extern crate test;

use {
criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion},
rand::{rngs::SmallRng, seq::SliceRandom, SeedableRng},
solana_accounts_db::{
accounts_db::AccountsDb,
read_only_accounts_cache::{ReadOnlyAccountsCache, CACHE_ENTRY_SIZE},
},
solana_sdk::{
account::{Account, ReadableAccount},
pubkey,
},
std::{
iter,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
thread::Builder,
},
};

/// Sizes of accounts to bench.
const ACCOUNTS_SIZES: &[usize] = &[0, 512, 1024];
/// Numbers of reader and writer threads to bench.
const NUM_READERS_WRITERS: &[usize] = &[
8,
16,
// These parameters are likely to freeze your computer, if it has less than
// 32 cores.
// 32, 64, 128, 256, 512, 1024,
];

fn bench_read_only_accounts_cache(c: &mut Criterion) {
let mut group = c.benchmark_group("cache");
let slot = 0;

for account_size in ACCOUNTS_SIZES {
// Number of accounts to use in the benchmark.
let num_accounts_benched = AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI
.div_ceil(CACHE_ENTRY_SIZE.saturating_add(*account_size));
group.sample_size(num_accounts_benched);

for num_readers_writers in NUM_READERS_WRITERS {
let cache = Arc::new(ReadOnlyAccountsCache::new(
AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO,
AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI,
AccountsDb::READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE,
));

// Benchmark the performance of loading and storing accounts in an
// initially empty cache.
let pubkeys: Vec<_> = iter::repeat_with(pubkey::new_rand)
.take(num_accounts_benched)
.collect();
group.bench_function(
BenchmarkId::new(
"read_only_accounts_cache_store",
format!("{account_size}_bytes_{num_readers_writers}_threads"),
),
|b| {
b.iter_batched(
|| {
let accounts_data = iter::repeat(
Account {
lamports: 1,
data: vec![1; *account_size],
..Default::default()
}
.to_account_shared_data(),
);
pubkeys.clone().into_iter().zip(accounts_data)
},
|accounts_iter| {
for (pubkey, account) in accounts_iter {
cache.store(pubkey, slot, account);
}
},
BatchSize::PerIteration,
)
},
);
group.bench_function(
BenchmarkId::new(
"read_only_accounts_cache_load",
format!("{account_size}_bytes_{num_readers_writers}_threads"),
),
|b| {
b.iter_batched(
|| pubkeys.clone().into_iter(),
|pubkeys_iter| {
for pubkey in pubkeys_iter {
test::black_box(cache.load(pubkey, slot));
}
},
BatchSize::SmallInput,
)
},
);
}
}
}

/// Benchmarks the read-only cache eviction mechanism. It does so by performing
/// multithreaded reads and writes on a full cache. Each write triggers
/// eviction. Background reads add more contention.
fn bench_read_only_accounts_cache_eviction(c: &mut Criterion) {
/// Number of accounts to use in the benchmark. That's the maximum number
/// of evictions observed on mainnet validators.
const NUM_ACCOUNTS_BENCHED: usize = 1000;

let mut group = c.benchmark_group("cache_eviction");
group.sample_size(NUM_ACCOUNTS_BENCHED);

for account_size in ACCOUNTS_SIZES {
// Number of accounts needed to initially fill the cache.
let num_accounts_init = AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI
.div_ceil(CACHE_ENTRY_SIZE.saturating_add(*account_size));

for num_readers_writers in NUM_READERS_WRITERS {
let cache = Arc::new(ReadOnlyAccountsCache::new(
AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO,
AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI,
AccountsDb::READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE,
));

// Prepare accounts for the cache fillup.
let pubkeys: Vec<_> = iter::repeat_with(pubkey::new_rand)
.take(num_accounts_init)
.collect();
let accounts_data = iter::repeat(
Account {
lamports: 1,
data: vec![1; *account_size],
..Default::default()
}
.into(),
)
.take(num_accounts_init);
let accounts = pubkeys.iter().zip(accounts_data);

// Fill up the cache.
let slot = 0;
for (pubkey, account) in accounts {
cache.store(*pubkey, slot, account);
}

// Spawn the reader and writer threads in the background. They are
// going to read and write
let stop_threads = Arc::new(AtomicBool::new(false));
let reader_handles = (0..*num_readers_writers).map(|i| {
let stop_threads = Arc::clone(&stop_threads);
let cache = Arc::clone(&cache);
let pubkeys = pubkeys.clone();

Builder::new()
.name(format!("reader{i:02}"))
.spawn({
move || {
// Continuously read random accounts.
let mut rng = SmallRng::seed_from_u64(i as u64);
while !stop_threads.load(Ordering::Relaxed) {
let pubkey = pubkeys.choose(&mut rng).unwrap();
test::black_box(cache.load(*pubkey, slot));
}
}
})
.unwrap()
});
let slot = 1;
let writer_handles = (0..*num_readers_writers).map(|i| {
let stop_threads = Arc::clone(&stop_threads);
let cache = Arc::clone(&cache);
let pubkeys = pubkeys.clone();

Builder::new()
.name(format!("writer{i:02}"))
.spawn({
move || {
// Continuously write to already existing pubkeys.
let mut rng = SmallRng::seed_from_u64(100_u64.saturating_add(i as u64));
while !stop_threads.load(Ordering::Relaxed) {
let pubkey = pubkeys.choose(&mut rng).unwrap();
cache.store(
*pubkey,
slot,
Account {
lamports: 1,
data: vec![1; *account_size],
..Default::default()
}
.to_account_shared_data(),
);
}
}
})
.unwrap()
});

// Benchmark the performance of loading and storing accounts in a
// cache that is fully populated. This triggers eviction for each
// write operation. Background threads introduce contention.
let pubkeys: Vec<_> = iter::repeat_with(pubkey::new_rand)
.take(NUM_ACCOUNTS_BENCHED)
.collect();
group.bench_function(
BenchmarkId::new(
"read_only_accounts_cache_eviction_store",
format!("{account_size}_bytes_{num_readers_writers}_threads"),
),
|b| {
b.iter_batched(
|| {
let accounts_data = iter::repeat(
Account {
lamports: 1,
data: vec![1; *account_size],
..Default::default()
}
.to_account_shared_data(),
);
pubkeys.clone().into_iter().zip(accounts_data)
},
|accounts_iter| {
for (pubkey, account) in accounts_iter {
cache.store(pubkey, slot, account);
}
},
BatchSize::PerIteration,
)
},
);
group.bench_function(
BenchmarkId::new(
"read_only_accounts_cache_eviction_load",
format!("{account_size}_bytes_{num_readers_writers}_threads"),
),
|b| {
b.iter_batched(
|| pubkeys.clone().into_iter(),
|pubkeys_iter| {
for pubkey in pubkeys_iter {
test::black_box(cache.load(pubkey, slot));
}
},
BatchSize::SmallInput,
)
},
);

stop_threads.store(true, Ordering::Relaxed);
for reader_handle in reader_handles {
reader_handle.join().unwrap();
}
for writer_handle in writer_handles {
writer_handle.join().unwrap();
}
}
}
}

criterion_group!(
benches,
bench_read_only_accounts_cache,
bench_read_only_accounts_cache_eviction
);
criterion_main!(benches);
Loading

0 comments on commit ba708a3

Please sign in to comment.