diff --git a/Cargo.lock b/Cargo.lock index 23c429edd446de..a733f4986ca348 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3684,6 +3684,16 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" +[[package]] +name = "matrixmultiply" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9380b911e3e96d10c1f415da0876389aaf1b56759054eeb0de7df940c456ba1a" +dependencies = [ + "autocfg", + "rawpointer", +] + [[package]] name = "memchr" version = "2.6.3" @@ -3849,6 +3859,21 @@ dependencies = [ "tempfile", ] +[[package]] +name = "ndarray" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "882ed72dce9365842bf196bdeedf5055305f11fc8c03dee7bb0194a6cad34841" +dependencies = [ + "matrixmultiply", + "num-complex 0.4.6", + "num-integer", + "num-traits", + "portable-atomic", + "portable-atomic-util", + "rawpointer", +] + [[package]] name = "net2" version = "0.2.37" @@ -3909,7 +3934,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8536030f9fea7127f841b45bb6243b27255787fb4eb83958aa1ef9d2fdc0c36" dependencies = [ "num-bigint 0.2.6", - "num-complex", + "num-complex 0.2.4", "num-integer", "num-iter", "num-rational", @@ -3947,6 +3972,15 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + [[package]] name = "num-derive" version = "0.4.2" @@ -4441,6 +4475,15 @@ version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] + [[package]] name = "ppv-lite86" version = "0.2.15" @@ -4887,6 +4930,12 @@ dependencies = [ "bitflags 2.6.0", ] +[[package]] +name = "rawpointer" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" + [[package]] name = "rayon" version = "1.10.0" @@ -5948,6 +5997,7 @@ dependencies = [ "memmap2", "memoffset 0.9.1", "modular-bitfield", + "ndarray", "num_cpus", "num_enum", "qualifier_attr", diff --git a/Cargo.toml b/Cargo.toml index ac186c11930cf3..4fb0dfb4cf9212 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -375,6 +375,7 @@ merlin = "3" min-max-heap = "1.3.0" mockall = "0.11.4" modular-bitfield = "0.11.2" +ndarray = "0.16.1" nix = "0.29.0" num-bigint = "0.4.6" num-derive = "0.4" diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index 82a983ede37a20..24a104aa0b4266 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -67,6 +67,7 @@ assert_matches = { workspace = true } criterion = { workspace = true } libsecp256k1 = { workspace = true } memoffset = { workspace = true } +ndarray = { workspace = true } rand_chacha = { workspace = true } serde_bytes = { workspace = true } # See order-crates-for-publishing.py for using this unusual `path = "."` @@ -103,6 +104,10 @@ harness = false name = "bench_hashing" harness = false +[[bench]] +name = "read_only_accounts_cache" +harness = false + [[bench]] name = "bench_serde" harness = false diff --git a/accounts-db/benches/bench_accounts_file.rs b/accounts-db/benches/bench_accounts_file.rs index 6fe87523cf18f1..01efd6252ccf52 100644 --- a/accounts-db/benches/bench_accounts_file.rs +++ b/accounts-db/benches/bench_accounts_file.rs @@ -1,11 +1,9 @@ #![allow(clippy::arithmetic_side_effects)] use { criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput}, - rand::{distributions::WeightedIndex, prelude::*}, - rand_chacha::ChaChaRng, solana_accounts_db::{ accounts_file::StorageAccess, - append_vec::{self, AppendVec, SCAN_BUFFER_SIZE_WITHOUT_DATA}, + append_vec::{self, AppendVec}, tiered_storage::{ file::TieredReadableFile, hot::{HotStorageReader, HotStorageWriter}, @@ -15,13 +13,13 @@ use { account::{AccountSharedData, ReadableAccount}, clock::Slot, pubkey::Pubkey, - rent::Rent, rent_collector::RENT_EXEMPT_RENT_EPOCH, - system_instruction::MAX_PERMITTED_DATA_LENGTH, }, - std::{iter, mem::ManuallyDrop}, + std::mem::ManuallyDrop, }; +mod utils; + const ACCOUNTS_COUNTS: [usize; 4] = [ 1, // the smallest count; will bench overhead 100, // number of accounts written per slot on mnb (with *no* rent rewrites) @@ -102,54 +100,17 @@ fn bench_scan_pubkeys(c: &mut Criterion) { let mut group = c.benchmark_group("scan_pubkeys"); let temp_dir = tempfile::tempdir().unwrap(); - // distribution of account data sizes to use when creating accounts - // 3% of accounts have no data - // 75% of accounts are 165 bytes (a token account) - // 20% of accounts are 200 bytes (a stake account) - // 1% of accounts are 256 kibibytes (pathological case for the scan buffer) - // 1% of accounts are 10 mebibytes (the max size for an account) - let data_sizes = [ - 0, - 165, - 200, - SCAN_BUFFER_SIZE_WITHOUT_DATA, - MAX_PERMITTED_DATA_LENGTH as usize, - ]; - let weights = [3, 75, 20, 1, 1]; - let distribution = WeightedIndex::new(weights).unwrap(); - - let rent = Rent::default(); - let rent_minimum_balances: Vec<_> = data_sizes - .iter() - .map(|data_size| rent.minimum_balance(*data_size)) - .collect(); - for accounts_count in ACCOUNTS_COUNTS { group.throughput(Throughput::Elements(accounts_count as u64)); - let mut rng = ChaChaRng::seed_from_u64(accounts_count as u64); - let pubkeys: Vec<_> = iter::repeat_with(Pubkey::new_unique) - .take(accounts_count) - .collect(); - let accounts: Vec<_> = iter::repeat_with(|| { - let index = distribution.sample(&mut rng); - AccountSharedData::new_rent_epoch( - rent_minimum_balances[index], - data_sizes[index], - &Pubkey::default(), - RENT_EXEMPT_RENT_EPOCH, - ) - }) - .take(pubkeys.len()) - .collect(); - let storable_accounts: Vec<_> = iter::zip(&pubkeys, &accounts).collect(); + let storable_accounts: Vec<_> = utils::accounts(255).take(accounts_count).collect(); // create an append vec file let append_vec_path = temp_dir.path().join(format!("append_vec_{accounts_count}")); _ = std::fs::remove_file(&append_vec_path); - let file_size = accounts + let file_size = storable_accounts .iter() - .map(|account| append_vec::aligned_stored_size(account.data().len())) + .map(|(_, account)| append_vec::aligned_stored_size(account.data().len())) .sum(); let append_vec = AppendVec::new(append_vec_path, true, file_size); let stored_accounts_info = append_vec diff --git a/accounts-db/benches/read_only_accounts_cache.rs b/accounts-db/benches/read_only_accounts_cache.rs new file mode 100644 index 00000000000000..f82d9c38249e39 --- /dev/null +++ b/accounts-db/benches/read_only_accounts_cache.rs @@ -0,0 +1,287 @@ +#![feature(test)] + +extern crate test; + +use { + criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}, + rand::{rngs::SmallRng, seq::SliceRandom, SeedableRng}, + solana_accounts_db::{ + accounts_db::AccountsDb, read_only_accounts_cache::ReadOnlyAccountsCache, + }, + std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + thread::Builder, + time::{Duration, Instant}, + }, +}; +mod utils; + +/// Numbers of reader and writer threads to bench. +const NUM_READERS_WRITERS: &[usize] = &[ + 8, + 16, + // These parameters are likely to freeze your computer, if it has less than + // 32 cores. + // 32, 64, 128, 256, 512, 1024, +]; + +fn bench_read_only_accounts_cache(c: &mut Criterion) { + let mut group = c.benchmark_group("read_only_accounts_cache"); + let slot = 0; + + // Prepare initial accounts, but make sure to not fill up the cache. + let accounts: Vec<_> = utils::accounts_with_size_limit( + 255, + AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO / 2, + ) + .collect(); + let pubkeys: Vec<_> = accounts + .iter() + .map(|(pubkey, _)| pubkey.to_owned()) + .collect(); + + for num_readers_writers in NUM_READERS_WRITERS { + let cache = Arc::new(ReadOnlyAccountsCache::new( + AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO, + AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI, + AccountsDb::READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE, + )); + + for (pubkey, account) in accounts.iter() { + cache.store(*pubkey, slot, account.clone()); + } + + // Spawn the reader threads in the background. They are reading the + // reading the initially inserted accounts. + let stop_threads = Arc::new(AtomicBool::new(false)); + let reader_handles = (0..*num_readers_writers) + .map(|i| { + let stop_threads = Arc::clone(&stop_threads); + let cache = Arc::clone(&cache); + let pubkeys = pubkeys.clone(); + + Builder::new() + .name(format!("reader{i:02}")) + .spawn({ + move || { + // Continuously read random accounts. + let mut rng = SmallRng::seed_from_u64(i as u64); + while !stop_threads.load(Ordering::Relaxed) { + let pubkey = pubkeys.choose(&mut rng).unwrap(); + test::black_box(cache.load(*pubkey, slot)); + } + } + }) + .unwrap() + }) + .collect::>(); + + // Spawn the writer threads in the background. + let slot = 1; + let writer_handles = (0..*num_readers_writers) + .map(|i| { + let stop_threads = Arc::clone(&stop_threads); + let cache = Arc::clone(&cache); + let accounts = accounts.clone(); + + Builder::new() + .name(format!("writer{i:02}")) + .spawn({ + move || { + // Continuously write to already existing pubkeys. + let mut rng = SmallRng::seed_from_u64(100_u64.saturating_add(i as u64)); + while !stop_threads.load(Ordering::Relaxed) { + let (pubkey, account) = accounts.choose(&mut rng).unwrap(); + cache.store(*pubkey, slot, account.clone()); + } + } + }) + .unwrap() + }) + .collect::>(); + + group.bench_function( + BenchmarkId::new("read_only_accounts_cache_store", num_readers_writers), + |b| { + b.iter_custom(|iters| { + let mut total_time = Duration::new(0, 0); + + for (pubkey, account) in accounts.iter().cycle().take(iters as usize) { + // Measure only stores. + let start = Instant::now(); + cache.store(*pubkey, slot, account.clone()); + total_time = total_time.saturating_add(start.elapsed()); + } + total_time + }) + }, + ); + group.bench_function( + BenchmarkId::new("read_only_accounts_cache_load", num_readers_writers), + |b| { + b.iter_custom(|iters| { + let start = Instant::now(); + for (pubkey, _) in accounts.iter().cycle().take(iters as usize) { + test::black_box(cache.load(*pubkey, slot)); + } + + start.elapsed() + }) + }, + ); + + stop_threads.store(true, Ordering::Relaxed); + for reader_handle in reader_handles { + reader_handle.join().unwrap(); + } + for writer_handle in writer_handles { + writer_handle.join().unwrap(); + } + } +} + +/// Benchmarks the read-only cache eviction mechanism. It does so by performing +/// multithreaded reads and writes on a full cache. Each write triggers +/// eviction. Background reads add more contention. +fn bench_read_only_accounts_cache_eviction(c: &mut Criterion) { + // Prepare initial accounts, enough of the to fill up the cache. + let accounts: Vec<_> = utils::accounts_with_size_limit( + 255, + AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI * 10, + ) + .collect(); + let pubkeys: Vec<_> = accounts + .iter() + .map(|(pubkey, _)| pubkey.to_owned()) + .collect(); + + let mut group = c.benchmark_group("read_only_accounts_cache_eviction"); + + for num_readers_writers in NUM_READERS_WRITERS { + let cache = Arc::new(ReadOnlyAccountsCache::new( + AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO, + AccountsDb::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI, + AccountsDb::READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE, + )); + + // Fill up the cache. + let slot = 0; + for (pubkey, account) in accounts.iter() { + cache.store(*pubkey, slot, account.clone()); + } + + // Spawn the reader threads in the background. They are reading the + // reading the initially inserted accounts. + let stop_threads = Arc::new(AtomicBool::new(false)); + let reader_handles = (0..*num_readers_writers) + .map(|i| { + let stop_threads = Arc::clone(&stop_threads); + let cache = Arc::clone(&cache); + let pubkeys = pubkeys.clone(); + + Builder::new() + .name(format!("reader{i:02}")) + .spawn({ + move || { + // Continuously read random accounts. + let mut rng = SmallRng::seed_from_u64(i as u64); + while !stop_threads.load(Ordering::Relaxed) { + let pubkey = pubkeys.choose(&mut rng).unwrap(); + test::black_box(cache.load(*pubkey, slot)); + } + } + }) + .unwrap() + }) + .collect::>(); + + // Spawn the writer threads in the background. Prepare the accounts + // with the same public keys and sizes as the initial ones. The + // intention is a constant overwrite in background for additional + // contention. + let slot = 1; + let writer_handles = (0..*num_readers_writers) + .map(|i| { + let stop_threads = Arc::clone(&stop_threads); + let cache = Arc::clone(&cache); + let accounts = accounts.clone(); + + Builder::new() + .name(format!("writer{i:02}")) + .spawn({ + move || { + // Continuously write to already existing pubkeys. + let mut rng = SmallRng::seed_from_u64(100_u64.saturating_add(i as u64)); + while !stop_threads.load(Ordering::Relaxed) { + let (pubkey, account) = accounts.choose(&mut rng).unwrap(); + cache.store(*pubkey, slot, account.clone()); + } + } + }) + .unwrap() + }) + .collect::>(); + + // Benchmark the performance of loading and storing accounts in a + // cache that is fully populated. This triggers eviction for each + // write operation. Background threads introduce contention. + group.bench_function( + BenchmarkId::new( + "read_only_accounts_cache_eviction_load", + num_readers_writers, + ), + |b| { + b.iter_custom(|iters| { + let mut rng = SmallRng::seed_from_u64(1); + let mut total_time = Duration::new(0, 0); + + for _ in 0..iters { + let pubkey = pubkeys.choose(&mut rng).unwrap().to_owned(); + + let start = Instant::now(); + test::black_box(cache.load(pubkey, slot)); + total_time = total_time.saturating_add(start.elapsed()); + } + + total_time + }) + }, + ); + group.bench_function( + BenchmarkId::new( + "read_only_accounts_cache_eviction_store", + num_readers_writers, + ), + |b| { + b.iter_custom(|iters| { + let accounts = utils::accounts(0).take(iters as usize); + + let start = Instant::now(); + for (pubkey, account) in accounts { + cache.store(pubkey, slot, account); + } + + start.elapsed() + }) + }, + ); + + stop_threads.store(true, Ordering::Relaxed); + for reader_handle in reader_handles { + reader_handle.join().unwrap(); + } + for writer_handle in writer_handles { + writer_handle.join().unwrap(); + } + } +} + +criterion_group!( + benches, + bench_read_only_accounts_cache, + bench_read_only_accounts_cache_eviction +); +criterion_main!(benches); diff --git a/accounts-db/benches/utils.rs b/accounts-db/benches/utils.rs new file mode 100644 index 00000000000000..8d23e839d8045a --- /dev/null +++ b/accounts-db/benches/utils.rs @@ -0,0 +1,91 @@ +use { + rand::{ + distributions::{Distribution, WeightedIndex}, + Rng, SeedableRng, + }, + rand_chacha::ChaChaRng, + solana_accounts_db::append_vec::SCAN_BUFFER_SIZE_WITHOUT_DATA, + solana_sdk::{ + account::AccountSharedData, pubkey::Pubkey, rent::Rent, + rent_collector::RENT_EXEMPT_RENT_EPOCH, system_instruction::MAX_PERMITTED_DATA_LENGTH, + }, + std::iter, +}; + +/// Sizes of accounts. +/// +/// - No data. +/// - 165 bytes (a token account). +/// - 200 bytes (a stake account). +/// - 256 kibibytes (pathological case for the scan buffer). +/// - 10 mebibytes (the max size for an account). +const DATA_SIZES: &[usize] = &[ + 0, + 165, + 200, + SCAN_BUFFER_SIZE_WITHOUT_DATA, + MAX_PERMITTED_DATA_LENGTH as usize, +]; +/// Distribution of the account sizes: +/// +/// - 3% of accounts have no data. +/// - 75% of accounts are 165 bytes (a token account). +/// - 20% of accounts are 200 bytes (a stake account). +/// - 1% of accounts are 256 kibibytes (pathological case for the scan buffer). +/// - 1% of accounts are 10 mebibytes (the max size for an account). +const WEIGHTS: &[usize] = &[3, 75, 20, 1, 1]; + +/// Returns an iterator with storable accounts. +pub fn accounts(seed: u64) -> impl Iterator { + let distribution = WeightedIndex::new(WEIGHTS).unwrap(); + let mut rng = ChaChaRng::seed_from_u64(seed); + let rent = Rent::default(); + + iter::repeat_with(move || { + let index = distribution.sample(&mut rng); + let data_size = DATA_SIZES[index]; + let owner: [u8; 32] = rng.gen(); + let owner = Pubkey::new_from_array(owner); + ( + owner, + AccountSharedData::new_rent_epoch( + rent.minimum_balance(data_size), + data_size, + &owner, + RENT_EXEMPT_RENT_EPOCH, + ), + ) + }) +} + +#[allow(dead_code)] +pub fn accounts_with_size_limit( + seed: u64, + size_limit: usize, +) -> impl Iterator { + let distribution = WeightedIndex::new(WEIGHTS).unwrap(); + let mut rng = ChaChaRng::seed_from_u64(seed); + let rent = Rent::default(); + let mut sum = 0_usize; + + iter::from_fn(move || { + let index = distribution.sample(&mut rng); + let data_size = DATA_SIZES[index]; + sum = sum.saturating_add(data_size); + if sum >= size_limit { + None + } else { + let owner = Pubkey::new_unique(); + + Some(( + owner, + AccountSharedData::new_rent_epoch( + rent.minimum_balance(data_size), + data_size, + &owner, + RENT_EXEMPT_RENT_EPOCH, + ), + )) + } + }) +} diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 03bbdd98387679..e7bbec4189e531 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1889,11 +1889,14 @@ impl AccountsDb { pub const DEFAULT_ACCOUNTS_HASH_CACHE_DIR: &'static str = "accounts_hash_cache"; // read only cache does not update lru on read of an entry unless it has been at least this many ms since the last lru update + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] const READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE: u32 = 100; // The default high and low watermark sizes for the accounts read cache. // If the cache size exceeds MAX_SIZE_HI, it'll evict entries until the size is <= MAX_SIZE_LO. + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO: usize = 400 * 1024 * 1024; + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] const DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI: usize = 410 * 1024 * 1024; pub fn default_for_tests() -> Self { diff --git a/accounts-db/src/bench_utils.rs b/accounts-db/src/bench_utils.rs new file mode 100644 index 00000000000000..8b137891791fe9 --- /dev/null +++ b/accounts-db/src/bench_utils.rs @@ -0,0 +1 @@ + diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index 8e7b4faf926b75..27c41ccf27dcce 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -32,6 +32,9 @@ mod file_io; pub mod hardened_unpack; pub mod partitioned_rewards; pub mod pubkey_bins; +#[cfg(feature = "dev-context-only-utils")] +pub mod read_only_accounts_cache; +#[cfg(not(feature = "dev-context-only-utils"))] mod read_only_accounts_cache; mod rolling_bit_field; pub mod secondary_index; diff --git a/accounts-db/src/read_only_accounts_cache.rs b/accounts-db/src/read_only_accounts_cache.rs index 2431761bc5f535..a616a863535073 100644 --- a/accounts-db/src/read_only_accounts_cache.rs +++ b/accounts-db/src/read_only_accounts_cache.rs @@ -1,5 +1,7 @@ //! ReadOnlyAccountsCache used to store accounts, such as executable accounts, //! which can be large, loaded many times, and rarely change. +#[cfg(feature = "dev-context-only-utils")] +use qualifier_attr::qualifiers; use { dashmap::{mapref::entry::Entry, DashMap}, index_list::{Index, IndexList}, @@ -22,6 +24,7 @@ use { }, }; +#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] const CACHE_ENTRY_SIZE: usize = std::mem::size_of::() + 2 * std::mem::size_of::(); @@ -65,6 +68,7 @@ struct AtomicReadOnlyCacheStats { evictor_wakeup_count_productive: AtomicU64, } +#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] #[derive(Debug)] pub(crate) struct ReadOnlyAccountsCache { cache: Arc>, @@ -93,6 +97,7 @@ pub(crate) struct ReadOnlyAccountsCache { } impl ReadOnlyAccountsCache { + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] pub(crate) fn new( max_data_size_lo: usize, max_data_size_hi: usize, @@ -137,6 +142,7 @@ impl ReadOnlyAccountsCache { } } + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] pub(crate) fn load(&self, pubkey: Pubkey, slot: Slot) -> Option { let (account, load_us) = measure_us!({ let mut found = None; @@ -175,6 +181,7 @@ impl ReadOnlyAccountsCache { CACHE_ENTRY_SIZE + account.data().len() } + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] pub(crate) fn store(&self, pubkey: Pubkey, slot: Slot, account: AccountSharedData) { let measure_store = Measure::start(""); self.highest_slot_stored.fetch_max(slot, Ordering::Release); @@ -218,6 +225,7 @@ impl ReadOnlyAccountsCache { self.remove(pubkey) } + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] pub(crate) fn remove(&self, pubkey: Pubkey) -> Option { Self::do_remove(&pubkey, &self.cache, &self.queue, &self.data_size) } diff --git a/accounts-db/src/storable_accounts.rs b/accounts-db/src/storable_accounts.rs index 6304daf6002ba6..1843c53833d2c7 100644 --- a/accounts-db/src/storable_accounts.rs +++ b/accounts-db/src/storable_accounts.rs @@ -100,7 +100,8 @@ pub struct StorableAccountsCacher { /// abstract access to pubkey, account, slot, target_slot of either: /// a. (slot, &[&Pubkey, &ReadableAccount]) -/// b. (slot, &[&Pubkey, &ReadableAccount, Slot]) (we will use this later) +/// b. (slot, &[Pubkey, ReadableAccount]) +/// c. (slot, &[&Pubkey, &ReadableAccount, Slot]) (we will use this later) /// This trait avoids having to allocate redundant data when there is a duplicated slot parameter. /// All legacy callers do not have a unique slot per account to store. pub trait StorableAccounts<'a>: Sync { @@ -165,6 +166,26 @@ impl<'a: 'b, 'b> StorableAccounts<'a> for (Slot, &'b [(&'a Pubkey, &'a AccountSh } } +impl<'a: 'b, 'b> StorableAccounts<'a> for (Slot, &'b [(Pubkey, AccountSharedData)]) { + fn account( + &self, + index: usize, + mut callback: impl for<'local> FnMut(AccountForStorage<'local>) -> Ret, + ) -> Ret { + callback((&self.1[index].0, &self.1[index].1).into()) + } + fn slot(&self, _index: usize) -> Slot { + // per-index slot is not unique per slot when per-account slot is not included in the source data + self.target_slot() + } + fn target_slot(&self) -> Slot { + self.0 + } + fn len(&self) -> usize { + self.1.len() + } +} + /// holds slices of accounts being moved FROM a common source slot to 'target_slot' pub struct StorableAccountsBySlot<'a> { target_slot: Slot,