forked from solana-labs/solana
-
Notifications
You must be signed in to change notification settings - Fork 256
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
accounts-db: Benchmark cache evictions
The already existing `concurrent_{read,scan}_write` benchmarks are not sufficient for benchmarking the eviction and evaluating what kind of eviction policy performs the best, because they don't fill up the cache, so eviction never happens. Add a new benchmark, which starts measuring the concurrent reads and writes on a full cache.
- Loading branch information
1 parent
0c26485
commit ba708a3
Showing
7 changed files
with
340 additions
and
1 deletion.
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,270 @@ | ||
#![feature(test)] | ||
|
||
extern crate test; | ||
|
||
use { | ||
criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}, | ||
rand::{rngs::SmallRng, seq::SliceRandom, SeedableRng}, | ||
solana_accounts_db::{ | ||
accounts_db::AccountsDb, | ||
read_only_accounts_cache::{ReadOnlyAccountsCache, CACHE_ENTRY_SIZE}, | ||
}, | ||
solana_sdk::{ | ||
account::{Account, ReadableAccount}, | ||
pubkey, | ||
}, | ||
std::{ | ||
iter, | ||
sync::{ | ||
atomic::{AtomicBool, Ordering}, | ||
Arc, | ||
}, | ||
thread::Builder, | ||
}, | ||
}; | ||
|
||
/// Sizes of accounts to bench. | ||
const ACCOUNTS_SIZES: &[usize] = &[0, 512, 1024]; | ||
/// Numbers of reader and writer threads to bench. | ||
const NUM_READERS_WRITERS: &[usize] = &[ | ||
8, | ||
16, | ||
// These parameters are likely to freeze your computer, if it has less than | ||
// 32 cores. | ||
// 32, 64, 128, 256, 512, 1024, | ||
]; | ||
|
||
fn bench_read_only_accounts_cache(c: &mut Criterion) { | ||
let mut group = c.benchmark_group("cache"); | ||
let slot = 0; | ||
|
||
for account_size in ACCOUNTS_SIZES { | ||
// Number of accounts to use in the benchmark. | ||
let num_accounts_benched = AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI | ||
.div_ceil(CACHE_ENTRY_SIZE.saturating_add(*account_size)); | ||
group.sample_size(num_accounts_benched); | ||
|
||
for num_readers_writers in NUM_READERS_WRITERS { | ||
let cache = Arc::new(ReadOnlyAccountsCache::new( | ||
AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO, | ||
AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI, | ||
AccountsDb::READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE, | ||
)); | ||
|
||
// Benchmark the performance of loading and storing accounts in an | ||
// initially empty cache. | ||
let pubkeys: Vec<_> = iter::repeat_with(pubkey::new_rand) | ||
.take(num_accounts_benched) | ||
.collect(); | ||
group.bench_function( | ||
BenchmarkId::new( | ||
"read_only_accounts_cache_store", | ||
format!("{account_size}_bytes_{num_readers_writers}_threads"), | ||
), | ||
|b| { | ||
b.iter_batched( | ||
|| { | ||
let accounts_data = iter::repeat( | ||
Account { | ||
lamports: 1, | ||
data: vec![1; *account_size], | ||
..Default::default() | ||
} | ||
.to_account_shared_data(), | ||
); | ||
pubkeys.clone().into_iter().zip(accounts_data) | ||
}, | ||
|accounts_iter| { | ||
for (pubkey, account) in accounts_iter { | ||
cache.store(pubkey, slot, account); | ||
} | ||
}, | ||
BatchSize::PerIteration, | ||
) | ||
}, | ||
); | ||
group.bench_function( | ||
BenchmarkId::new( | ||
"read_only_accounts_cache_load", | ||
format!("{account_size}_bytes_{num_readers_writers}_threads"), | ||
), | ||
|b| { | ||
b.iter_batched( | ||
|| pubkeys.clone().into_iter(), | ||
|pubkeys_iter| { | ||
for pubkey in pubkeys_iter { | ||
test::black_box(cache.load(pubkey, slot)); | ||
} | ||
}, | ||
BatchSize::SmallInput, | ||
) | ||
}, | ||
); | ||
} | ||
} | ||
} | ||
|
||
/// Benchmarks the read-only cache eviction mechanism. It does so by performing | ||
/// multithreaded reads and writes on a full cache. Each write triggers | ||
/// eviction. Background reads add more contention. | ||
fn bench_read_only_accounts_cache_eviction(c: &mut Criterion) { | ||
/// Number of accounts to use in the benchmark. That's the maximum number | ||
/// of evictions observed on mainnet validators. | ||
const NUM_ACCOUNTS_BENCHED: usize = 1000; | ||
|
||
let mut group = c.benchmark_group("cache_eviction"); | ||
group.sample_size(NUM_ACCOUNTS_BENCHED); | ||
|
||
for account_size in ACCOUNTS_SIZES { | ||
// Number of accounts needed to initially fill the cache. | ||
let num_accounts_init = AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI | ||
.div_ceil(CACHE_ENTRY_SIZE.saturating_add(*account_size)); | ||
|
||
for num_readers_writers in NUM_READERS_WRITERS { | ||
let cache = Arc::new(ReadOnlyAccountsCache::new( | ||
AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO, | ||
AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI, | ||
AccountsDb::READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE, | ||
)); | ||
|
||
// Prepare accounts for the cache fillup. | ||
let pubkeys: Vec<_> = iter::repeat_with(pubkey::new_rand) | ||
.take(num_accounts_init) | ||
.collect(); | ||
let accounts_data = iter::repeat( | ||
Account { | ||
lamports: 1, | ||
data: vec![1; *account_size], | ||
..Default::default() | ||
} | ||
.into(), | ||
) | ||
.take(num_accounts_init); | ||
let accounts = pubkeys.iter().zip(accounts_data); | ||
|
||
// Fill up the cache. | ||
let slot = 0; | ||
for (pubkey, account) in accounts { | ||
cache.store(*pubkey, slot, account); | ||
} | ||
|
||
// Spawn the reader and writer threads in the background. They are | ||
// going to read and write | ||
let stop_threads = Arc::new(AtomicBool::new(false)); | ||
let reader_handles = (0..*num_readers_writers).map(|i| { | ||
let stop_threads = Arc::clone(&stop_threads); | ||
let cache = Arc::clone(&cache); | ||
let pubkeys = pubkeys.clone(); | ||
|
||
Builder::new() | ||
.name(format!("reader{i:02}")) | ||
.spawn({ | ||
move || { | ||
// Continuously read random accounts. | ||
let mut rng = SmallRng::seed_from_u64(i as u64); | ||
while !stop_threads.load(Ordering::Relaxed) { | ||
let pubkey = pubkeys.choose(&mut rng).unwrap(); | ||
test::black_box(cache.load(*pubkey, slot)); | ||
} | ||
} | ||
}) | ||
.unwrap() | ||
}); | ||
let slot = 1; | ||
let writer_handles = (0..*num_readers_writers).map(|i| { | ||
let stop_threads = Arc::clone(&stop_threads); | ||
let cache = Arc::clone(&cache); | ||
let pubkeys = pubkeys.clone(); | ||
|
||
Builder::new() | ||
.name(format!("writer{i:02}")) | ||
.spawn({ | ||
move || { | ||
// Continuously write to already existing pubkeys. | ||
let mut rng = SmallRng::seed_from_u64(100_u64.saturating_add(i as u64)); | ||
while !stop_threads.load(Ordering::Relaxed) { | ||
let pubkey = pubkeys.choose(&mut rng).unwrap(); | ||
cache.store( | ||
*pubkey, | ||
slot, | ||
Account { | ||
lamports: 1, | ||
data: vec![1; *account_size], | ||
..Default::default() | ||
} | ||
.to_account_shared_data(), | ||
); | ||
} | ||
} | ||
}) | ||
.unwrap() | ||
}); | ||
|
||
// Benchmark the performance of loading and storing accounts in a | ||
// cache that is fully populated. This triggers eviction for each | ||
// write operation. Background threads introduce contention. | ||
let pubkeys: Vec<_> = iter::repeat_with(pubkey::new_rand) | ||
.take(NUM_ACCOUNTS_BENCHED) | ||
.collect(); | ||
group.bench_function( | ||
BenchmarkId::new( | ||
"read_only_accounts_cache_eviction_store", | ||
format!("{account_size}_bytes_{num_readers_writers}_threads"), | ||
), | ||
|b| { | ||
b.iter_batched( | ||
|| { | ||
let accounts_data = iter::repeat( | ||
Account { | ||
lamports: 1, | ||
data: vec![1; *account_size], | ||
..Default::default() | ||
} | ||
.to_account_shared_data(), | ||
); | ||
pubkeys.clone().into_iter().zip(accounts_data) | ||
}, | ||
|accounts_iter| { | ||
for (pubkey, account) in accounts_iter { | ||
cache.store(pubkey, slot, account); | ||
} | ||
}, | ||
BatchSize::PerIteration, | ||
) | ||
}, | ||
); | ||
group.bench_function( | ||
BenchmarkId::new( | ||
"read_only_accounts_cache_eviction_load", | ||
format!("{account_size}_bytes_{num_readers_writers}_threads"), | ||
), | ||
|b| { | ||
b.iter_batched( | ||
|| pubkeys.clone().into_iter(), | ||
|pubkeys_iter| { | ||
for pubkey in pubkeys_iter { | ||
test::black_box(cache.load(pubkey, slot)); | ||
} | ||
}, | ||
BatchSize::SmallInput, | ||
) | ||
}, | ||
); | ||
|
||
stop_threads.store(true, Ordering::Relaxed); | ||
for reader_handle in reader_handles { | ||
reader_handle.join().unwrap(); | ||
} | ||
for writer_handle in writer_handles { | ||
writer_handle.join().unwrap(); | ||
} | ||
} | ||
} | ||
} | ||
|
||
criterion_group!( | ||
benches, | ||
bench_read_only_accounts_cache, | ||
bench_read_only_accounts_cache_eviction | ||
); | ||
criterion_main!(benches); |
Oops, something went wrong.