Skip to content

Commit

Permalink
accounts-db: Benchmark cache evictions (#4045)
Browse files Browse the repository at this point in the history
The already existing `concurrent_{read,scan}_write` benchmarks are not
sufficient for benchmarking the eviction and evaluating what kind of
eviction policy performs the best, because they don't fill up the cache,
so eviction never happens. The number of readers in that benchmark is
low (5) and there are no writer threads causing more contention. The
cache is RW-locked, so bencharking only concurrent reads doesn't push
it to the limits.

Add new benchmarks which are made with measuring contention in mind:

- `read_only_accounts_cache` - benchmarks read-only cache loads and
  stores without causing eviction.
- `read_only_accounts_cache_lo_hi` - benchmarks read-only cache eviction
  with low and high thresholds. After each eviction, enough stores need
  to be made to reach the difference between the low and high threshold,
  triggering another eviction. Aims to simulate contention in a manner
  close to what occurs on validators.
- `read_only_accounts_cache_hi` - benchmarks read-only cache eviction
  without differentiating between low and high thresholds. Each store
  triggers another eviction immediately. Measures the absolutely
  worst-case scenario, which may not reflect actual conditions in
  validators.
  • Loading branch information
vadorovsky authored Dec 21, 2024
1 parent eef7e26 commit a2d88af
Show file tree
Hide file tree
Showing 8 changed files with 457 additions and 28 deletions.
4 changes: 4 additions & 0 deletions accounts-db/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,10 @@ harness = false
name = "bench_hashing"
harness = false

[[bench]]
name = "read_only_accounts_cache"
harness = false

[[bench]]
name = "bench_serde"
harness = false
Expand Down
33 changes: 6 additions & 27 deletions accounts-db/benches/bench_accounts_file.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
#![allow(clippy::arithmetic_side_effects)]
use {
criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput},
rand::{distributions::WeightedIndex, prelude::*},
rand_chacha::ChaChaRng,
solana_accounts_db::{
accounts_file::StorageAccess,
append_vec::{self, AppendVec, SCAN_BUFFER_SIZE_WITHOUT_DATA},
Expand All @@ -15,13 +13,14 @@ use {
account::{AccountSharedData, ReadableAccount},
clock::Slot,
pubkey::Pubkey,
rent::Rent,
rent_collector::RENT_EXEMPT_RENT_EPOCH,
system_instruction::MAX_PERMITTED_DATA_LENGTH,
},
std::{iter, mem::ManuallyDrop},
std::mem::ManuallyDrop,
};

mod utils;

const ACCOUNTS_COUNTS: [usize; 4] = [
1, // the smallest count; will bench overhead
100, // number of accounts written per slot on mnb (with *no* rent rewrites)
Expand Down Expand Up @@ -116,40 +115,20 @@ fn bench_scan_pubkeys(c: &mut Criterion) {
MAX_PERMITTED_DATA_LENGTH as usize,
];
let weights = [3, 75, 20, 1, 1];
let distribution = WeightedIndex::new(weights).unwrap();

let rent = Rent::default();
let rent_minimum_balances: Vec<_> = data_sizes
.iter()
.map(|data_size| rent.minimum_balance(*data_size))
.collect();

for accounts_count in ACCOUNTS_COUNTS {
group.throughput(Throughput::Elements(accounts_count as u64));
let mut rng = ChaChaRng::seed_from_u64(accounts_count as u64);

let pubkeys: Vec<_> = iter::repeat_with(Pubkey::new_unique)
let storable_accounts: Vec<_> = utils::accounts(255, &data_sizes, &weights)
.take(accounts_count)
.collect();
let accounts: Vec<_> = iter::repeat_with(|| {
let index = distribution.sample(&mut rng);
AccountSharedData::new_rent_epoch(
rent_minimum_balances[index],
data_sizes[index],
&Pubkey::default(),
RENT_EXEMPT_RENT_EPOCH,
)
})
.take(pubkeys.len())
.collect();
let storable_accounts: Vec<_> = iter::zip(&pubkeys, &accounts).collect();

// create an append vec file
let append_vec_path = temp_dir.path().join(format!("append_vec_{accounts_count}"));
_ = std::fs::remove_file(&append_vec_path);
let file_size = accounts
let file_size = storable_accounts
.iter()
.map(|account| append_vec::aligned_stored_size(account.data().len()))
.map(|(_, account)| append_vec::aligned_stored_size(account.data().len()))
.sum();
let append_vec = AppendVec::new(append_vec_path, true, file_size);
let stored_accounts_info = append_vec
Expand Down
Loading

0 comments on commit a2d88af

Please sign in to comment.