diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 03bbdd98387679..0a5fbba2df6666 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -2205,15 +2205,6 @@ impl AccountsDb { reclaim_result } - fn do_reset_uncleaned_roots(&self, max_clean_root: Option) { - let mut measure = Measure::start("reset"); - self.accounts_index.reset_uncleaned_roots(max_clean_root); - measure.stop(); - self.clean_accounts_stats - .reset_uncleaned_roots_us - .fetch_add(measure.as_us(), Ordering::Relaxed); - } - /// increment store_counts to non-zero for all stores that can not be deleted. /// a store cannot be deleted if: /// 1. one of the pubkeys in the store has account info to a store whose store count is not going to zero @@ -2546,8 +2537,6 @@ impl AccountsDb { slot_one_epoch_old.saturating_sub(acceptable_straggler_slot_count); let (old_storages, old_slots) = self.get_storages(..old_slot_cutoff); let num_old_storages = old_storages.len(); - self.accounts_index - .add_uncleaned_roots(old_slots.iter().copied()); for (old_slot, old_storage) in std::iter::zip(old_slots, old_storages) { self.dirty_stores.entry(old_slot).or_insert(old_storage); } @@ -2808,7 +2797,6 @@ impl AccountsDb { let num_candidates = Self::count_pubkeys(&candidates); let mut accounts_scan = Measure::start("accounts_scan"); - let uncleaned_roots = self.accounts_index.clone_uncleaned_roots(); let found_not_zero_accum = AtomicU64::new(0); let not_found_on_fork_accum = AtomicU64::new(0); let missing_accum = AtomicU64::new(0); @@ -2873,22 +2861,6 @@ impl AccountsDb { purges_old_accounts_local += 1; useless = false; } - // Note, this next if-block is only kept to maintain the - // `uncleaned_roots_slot_list_1` stat. - if uncleaned_roots.contains(slot) { - // Assertion enforced by `accounts_index.get()`, the latest slot - // will not be greater than the given `max_clean_root` - if let Some(max_clean_root_inclusive) = - max_clean_root_inclusive - { - assert!(slot <= &max_clean_root_inclusive); - } - if slot_list.len() == 1 { - self.clean_accounts_stats - .uncleaned_roots_slot_list_1 - .fetch_add(1, Ordering::Relaxed); - } - } } None => { // This pubkey is in the index but not in a root slot, so clean @@ -2954,7 +2926,6 @@ impl AccountsDb { let mut clean_old_rooted = Measure::start("clean_old_roots"); let (purged_account_slots, removed_accounts) = self.clean_accounts_older_than_root(&reclaims, &pubkeys_removed_from_accounts_index); - self.do_reset_uncleaned_roots(max_clean_root_inclusive); clean_old_rooted.stop(); let mut store_counts_time = Measure::start("store_counts"); @@ -3132,14 +3103,6 @@ impl AccountsDb { i64 ), ("scan_missing", missing_accum.load(Ordering::Relaxed), i64), - ("uncleaned_roots_len", uncleaned_roots.len(), i64), - ( - "uncleaned_roots_slot_list_1", - self.clean_accounts_stats - .uncleaned_roots_slot_list_1 - .swap(0, Ordering::Relaxed), - i64 - ), ( "get_account_sizes_us", self.clean_accounts_stats @@ -3168,13 +3131,6 @@ impl AccountsDb { .swap(0, Ordering::Relaxed), i64 ), - ( - "reset_uncleaned_roots_us", - self.clean_accounts_stats - .reset_uncleaned_roots_us - .swap(0, Ordering::Relaxed), - i64 - ), ( "remove_dead_accounts_remove_us", self.clean_accounts_stats @@ -3888,7 +3844,6 @@ impl AccountsDb { if store.num_zero_lamport_single_ref_accounts() == store.count() { // all accounts in this storage can be dead - self.accounts_index.add_uncleaned_roots([slot]); self.dirty_stores.entry(slot).or_insert(store); self.shrink_stats .num_dead_slots_added_to_clean @@ -3913,7 +3868,7 @@ impl AccountsDb { } /// Shrinks `store` by rewriting the alive accounts to a new storage - fn shrink_storage(&self, store: &AccountStorageEntry) { + fn shrink_storage(&self, store: Arc) { let slot = store.slot(); if self.accounts_cache.contains(slot) { // It is not correct to shrink a slot while it is in the write cache until flush is complete and the slot is removed from the write cache. @@ -3930,10 +3885,10 @@ impl AccountsDb { return; } let unique_accounts = - self.get_unique_accounts_from_storage_for_shrink(store, &self.shrink_stats); + self.get_unique_accounts_from_storage_for_shrink(&store, &self.shrink_stats); debug!("do_shrink_slot_store: slot: {}", slot); let shrink_collect = - self.shrink_collect::>(store, &unique_accounts, &self.shrink_stats); + self.shrink_collect::>(&store, &unique_accounts, &self.shrink_stats); // This shouldn't happen if alive_bytes is accurate. // However, it is possible that the remaining alive bytes could be 0. In that case, the whole slot should be marked dead by clean. @@ -3944,7 +3899,7 @@ impl AccountsDb { { if shrink_collect.alive_total_bytes == 0 { // clean needs to take care of this dead slot - self.accounts_index.add_uncleaned_roots([slot]); + self.dirty_stores.insert(slot, store.clone()); } if !shrink_collect.all_are_zero_lamports { @@ -4117,7 +4072,7 @@ impl AccountsDb { .get_slot_storage_entry_shrinking_in_progress_ok(slot) { if Self::is_shrinking_productive(&store) { - self.shrink_storage(&store) + self.shrink_storage(store) } } } @@ -4678,7 +4633,7 @@ impl AccountsDb { .num_ancient_slots_shrunk .fetch_add(1, Ordering::Relaxed); } - self.shrink_storage(&slot_shrink_candidate); + self.shrink_storage(slot_shrink_candidate); }); }) }); @@ -6327,7 +6282,6 @@ impl AccountsDb { // Only add to the uncleaned roots set *after* we've flushed the previous roots, // so that clean will actually be able to clean the slots. let num_new_roots = cached_roots.len(); - self.accounts_index.add_uncleaned_roots(cached_roots); (num_new_roots, num_roots_flushed, flush_stats) } @@ -8936,8 +8890,6 @@ impl AccountsDb { timings.slots_to_clean = uncleaned_roots.len() as u64; timings.num_duplicate_accounts = num_duplicate_accounts; - self.accounts_index - .add_uncleaned_roots(uncleaned_roots.into_iter()); accounts_data_len.fetch_sub(accounts_data_len_from_duplicates, Ordering::Relaxed); if let Some(duplicates_lt_hash) = duplicates_lt_hash { let old_val = outer_duplicates_lt_hash.replace(duplicates_lt_hash); @@ -8956,7 +8908,6 @@ impl AccountsDb { ); for (slot, storage) in all_zero_slots_to_clean { self.dirty_stores.insert(slot, storage); - self.accounts_index.add_uncleaned_roots([slot]); } } @@ -9290,6 +9241,12 @@ impl AccountStorageEntry { // These functions/fields are only usable from a dev context (i.e. tests and benches) #[cfg(feature = "dev-context-only-utils")] impl AccountsDb { + /// Return the number of slots marked with uncleaned pubkeys. + /// This is useful for testing clean aglorithms. + pub fn get_len_of_slots_with_uncleaned_pubkeys(&self) -> usize { + self.uncleaned_pubkeys.len() + } + /// useful to adapt tests written prior to introduction of the write cache /// to use the write cache pub fn add_root_and_flush_write_cache(&self, slot: Slot) { diff --git a/accounts-db/src/accounts_db/stats.rs b/accounts-db/src/accounts_db/stats.rs index d33ae9c1f63658..3af22b907224dc 100644 --- a/accounts-db/src/accounts_db/stats.rs +++ b/accounts-db/src/accounts_db/stats.rs @@ -275,11 +275,9 @@ pub struct CleanAccountsStats { // stats held here and reported by clean_accounts pub clean_old_root_us: AtomicU64, pub clean_old_root_reclaim_us: AtomicU64, - pub reset_uncleaned_roots_us: AtomicU64, pub remove_dead_accounts_remove_us: AtomicU64, pub remove_dead_accounts_shrink_us: AtomicU64, pub clean_stored_dead_slots_us: AtomicU64, - pub uncleaned_roots_slot_list_1: AtomicU64, pub get_account_sizes_us: AtomicU64, pub slots_cleaned: AtomicU64, } diff --git a/accounts-db/src/accounts_db/tests.rs b/accounts-db/src/accounts_db/tests.rs index 2f810b9598c6a8..98ff071dc5b45e 100644 --- a/accounts-db/src/accounts_db/tests.rs +++ b/accounts-db/src/accounts_db/tests.rs @@ -1976,43 +1976,6 @@ fn test_clean_max_slot_zero_lamport_account() { assert!(!accounts.accounts_index.contains_with(&pubkey, None, None)); } -#[test] -fn test_uncleaned_roots_with_account() { - solana_logger::setup(); - - let accounts = AccountsDb::new_single_for_tests(); - let pubkey = solana_sdk::pubkey::new_rand(); - let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner()); - //store an account - accounts.store_for_tests(0, &[(&pubkey, &account)]); - assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0); - - // simulate slots are rooted after while - accounts.add_root_and_flush_write_cache(0); - assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1); - - //now uncleaned roots are cleaned up - accounts.clean_accounts_for_tests(); - assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0); -} - -#[test] -fn test_uncleaned_roots_with_no_account() { - solana_logger::setup(); - - let accounts = AccountsDb::new_single_for_tests(); - - assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0); - - // simulate slots are rooted after while - accounts.add_root_and_flush_write_cache(0); - assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1); - - //now uncleaned roots are cleaned up - accounts.clean_accounts_for_tests(); - assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0); -} - fn assert_no_stores(accounts: &AccountsDb, slot: Slot) { let store = accounts.storage.get_slot_storage_entry(slot); assert!(store.is_none()); @@ -4358,13 +4321,6 @@ fn test_accounts_db_cache_clean_dead_slots() { // If no `max_clean_root` is specified, cleaning should purge all flushed slots accounts_db.flush_accounts_cache(true, None); assert_eq!(accounts_db.accounts_cache.num_slots(), 0); - let mut uncleaned_roots = accounts_db - .accounts_index - .clear_uncleaned_roots(None) - .into_iter() - .collect::>(); - uncleaned_roots.sort_unstable(); - assert_eq!(uncleaned_roots, slots); assert_eq!( accounts_db.accounts_cache.fetch_max_flush_root(), alive_slot, @@ -4412,13 +4368,6 @@ fn test_accounts_db_cache_clean() { // If no `max_clean_root` is specified, cleaning should purge all flushed slots accounts_db.flush_accounts_cache(true, None); assert_eq!(accounts_db.accounts_cache.num_slots(), 0); - let mut uncleaned_roots = accounts_db - .accounts_index - .clear_uncleaned_roots(None) - .into_iter() - .collect::>(); - uncleaned_roots.sort_unstable(); - assert_eq!(uncleaned_roots, slots); assert_eq!( accounts_db.accounts_cache.fetch_max_flush_root(), *slots.last().unwrap() @@ -4469,13 +4418,6 @@ fn run_test_accounts_db_cache_clean_max_root( assert_eq!(accounts_db.accounts_cache.num_slots(), 0,); } - let mut uncleaned_roots = accounts_db - .accounts_index - .clear_uncleaned_roots(None) - .into_iter() - .collect::>(); - uncleaned_roots.sort_unstable(); - let expected_max_flushed_root = if !is_cache_at_limit { // Should flush all slots between 0..=requested_flush_root requested_flush_root @@ -4484,10 +4426,6 @@ fn run_test_accounts_db_cache_clean_max_root( num_slots as Slot - 1 }; - assert_eq!( - uncleaned_roots, - slots[0..=expected_max_flushed_root as usize].to_vec() - ); assert_eq!( accounts_db.accounts_cache.fetch_max_flush_root(), expected_max_flushed_root, @@ -4865,7 +4803,7 @@ fn test_shrink_unref_handle_zero_lamport_single_ref_accounts() { // And now, slot 1 should be marked complete dead, which will be added // to uncleaned slots, which handle dropping dead storage. And it WON'T // be participating shrinking in the next round. - assert!(db.accounts_index.clone_uncleaned_roots().contains(&1)); + assert!(db.dirty_stores.contains_key(&1)); assert!(!db.shrink_candidate_slots.lock().unwrap().contains(&1)); // Now, make slot 0 dead by updating the remaining key @@ -7795,11 +7733,8 @@ fn test_handle_dropped_roots_for_ancient() { let slot0 = 0; let dropped_roots = vec![slot0]; db.accounts_index.add_root(slot0); - db.accounts_index.add_uncleaned_roots([slot0]); - assert!(db.accounts_index.is_uncleaned_root(slot0)); assert!(db.accounts_index.is_alive_root(slot0)); db.handle_dropped_roots_for_ancient(dropped_roots.into_iter()); - assert!(!db.accounts_index.is_uncleaned_root(slot0)); assert!(!db.accounts_index.is_alive_root(slot0)); } diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 2aae2d80a21553..dd6a8a9c47de1d 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -18,7 +18,6 @@ use { ThreadPool, }, solana_measure::measure::Measure, - solana_nohash_hasher::IntSet, solana_sdk::{ account::ReadableAccount, clock::{BankId, Slot}, @@ -441,7 +440,6 @@ pub struct RootsTracker { /// Updated every time we add a new root or clean/shrink an append vec into irrelevancy. /// Range is approximately the last N slots where N is # slots per epoch. pub alive_roots: RollingBitField, - uncleaned_roots: IntSet, } impl Default for RootsTracker { @@ -457,7 +455,6 @@ impl RootsTracker { pub fn new(max_width: u64) -> Self { Self { alive_roots: RollingBitField::new(max_width), - uncleaned_roots: IntSet::default(), } } @@ -2013,24 +2010,6 @@ impl + Into> AccountsIndex { w_roots_tracker.alive_roots.insert(slot); } - pub fn add_uncleaned_roots(&self, roots: I) - where - I: IntoIterator, - { - let mut w_roots_tracker = self.roots_tracker.write().unwrap(); - w_roots_tracker.uncleaned_roots.extend(roots); - } - - /// Removes `root` from `uncleaned_roots` and returns whether it was previously present - #[cfg(feature = "dev-context-only-utils")] - pub fn remove_uncleaned_root(&self, root: Slot) -> bool { - self.roots_tracker - .write() - .unwrap() - .uncleaned_roots - .remove(&root) - } - pub fn max_root_inclusive(&self) -> Slot { self.roots_tracker .read() @@ -2044,12 +2023,7 @@ impl + Into> AccountsIndex { /// return true if slot was a root pub fn clean_dead_slot(&self, slot: Slot) -> bool { let mut w_roots_tracker = self.roots_tracker.write().unwrap(); - let removed_from_unclean_roots = w_roots_tracker.uncleaned_roots.remove(&slot); if !w_roots_tracker.alive_roots.remove(&slot) { - if removed_from_unclean_roots { - error!("clean_dead_slot-removed_from_unclean_roots: {}", slot); - inc_new_counter_error!("clean_dead_slot-removed_from_unclean_roots", 1, 1); - } false } else { drop(w_roots_tracker); @@ -2061,7 +2035,6 @@ impl + Into> AccountsIndex { pub(crate) fn update_roots_stats(&self, stats: &mut AccountsIndexRootsStats) { let roots_tracker = self.roots_tracker.read().unwrap(); stats.roots_len = Some(roots_tracker.alive_roots.len()); - stats.uncleaned_roots_len = Some(roots_tracker.uncleaned_roots.len()); stats.roots_range = Some(roots_tracker.alive_roots.range_width()); } @@ -2069,17 +2042,6 @@ impl + Into> AccountsIndex { self.roots_tracker.read().unwrap().min_alive_root() } - pub(crate) fn reset_uncleaned_roots(&self, max_clean_root: Option) { - let mut w_roots_tracker = self.roots_tracker.write().unwrap(); - w_roots_tracker.uncleaned_roots.retain(|root| { - let is_cleaned = max_clean_root - .map(|max_clean_root| *root <= max_clean_root) - .unwrap_or(true); - // Only keep the slots that have yet to be cleaned - !is_cleaned - }); - } - pub fn num_alive_roots(&self) -> usize { self.roots_tracker.read().unwrap().alive_roots.len() } @@ -2089,14 +2051,6 @@ impl + Into> AccountsIndex { tracker.alive_roots.get_all() } - pub fn clone_uncleaned_roots(&self) -> IntSet { - self.roots_tracker.read().unwrap().uncleaned_roots.clone() - } - - pub fn uncleaned_roots_len(&self) -> usize { - self.roots_tracker.read().unwrap().uncleaned_roots.len() - } - // These functions/fields are only usable from a dev context (i.e. tests and benches) #[cfg(feature = "dev-context-only-utils")] // filter any rooted entries and return them along with a bool that indicates @@ -3141,34 +3095,6 @@ pub mod tests { assert!(index.is_alive_root(0)); } - #[test] - fn test_clean_and_unclean_slot() { - let index = AccountsIndex::::default_for_tests(); - assert_eq!(0, index.roots_tracker.read().unwrap().uncleaned_roots.len()); - index.add_root(0); - index.add_root(1); - index.add_uncleaned_roots([0, 1]); - assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len()); - - index.reset_uncleaned_roots(None); - assert_eq!(2, index.roots_tracker.read().unwrap().alive_roots.len()); - assert_eq!(0, index.roots_tracker.read().unwrap().uncleaned_roots.len()); - - index.add_root(2); - index.add_root(3); - index.add_uncleaned_roots([2, 3]); - assert_eq!(4, index.roots_tracker.read().unwrap().alive_roots.len()); - assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len()); - - index.clean_dead_slot(1); - assert_eq!(3, index.roots_tracker.read().unwrap().alive_roots.len()); - assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len()); - - index.clean_dead_slot(2); - assert_eq!(2, index.roots_tracker.read().unwrap().alive_roots.len()); - assert_eq!(1, index.roots_tracker.read().unwrap().uncleaned_roots.len()); - } - #[test] fn test_update_last_wins() { let key = solana_sdk::pubkey::new_rand(); @@ -4065,30 +3991,6 @@ pub mod tests { assert!(gc.is_empty()); } - pub fn clear_uncleaned_roots(&self, max_clean_root: Option) -> HashSet { - let mut cleaned_roots = HashSet::new(); - let mut w_roots_tracker = self.roots_tracker.write().unwrap(); - w_roots_tracker.uncleaned_roots.retain(|root| { - let is_cleaned = max_clean_root - .map(|max_clean_root| *root <= max_clean_root) - .unwrap_or(true); - if is_cleaned { - cleaned_roots.insert(*root); - } - // Only keep the slots that have yet to be cleaned - !is_cleaned - }); - cleaned_roots - } - - pub(crate) fn is_uncleaned_root(&self, slot: Slot) -> bool { - self.roots_tracker - .read() - .unwrap() - .uncleaned_roots - .contains(&slot) - } - pub fn clear_roots(&self) { self.roots_tracker.write().unwrap().alive_roots.clear() } diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 1cb8c7ab4b5a22..cc921aee0eedec 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -13525,3 +13525,60 @@ fn test_rehash_bad() { // let the show begin bank.rehash(); } + +/// Test that when a bank freezes, it populate `uncleaned_pubkeys` for the slot +/// to clean. And after clean, it will remove the slot from `uncleaned_pubkeys`. +#[test] +fn test_populate_uncleaned_slot_for_bank() { + let ten_sol = 10 * LAMPORTS_PER_SOL; + let bank = create_simple_test_bank(100); + let account = AccountSharedData::new(ten_sol, 0, &Pubkey::default()); + let pubkey = Pubkey::new_unique(); + bank.store_account_and_update_capitalization(&pubkey, &account); + bank.freeze(); + + assert_eq!( + bank.rc + .accounts + .accounts_db + .get_len_of_slots_with_uncleaned_pubkeys(), + 1 + ); + + bank.clean_accounts(); + assert_eq!( + bank.rc + .accounts + .accounts_db + .get_len_of_slots_with_uncleaned_pubkeys(), + 0 + ); +} + +/// This test is similar to `test_populate_uncleaned_slot_for_bank`, but it +/// tests when there is no accounts in the bank. In theory, we should optimize +/// and not populate the slot to clean. But the current code didn't check if +/// there are accounts in the bank, so it will populate the slot to clean even +/// it is empty. This test is to make sure the behavior is consistent. +#[test] +fn test_populate_uncleaned_slot_for_bank_with_empty_accounts() { + let bank = create_simple_test_bank(100); + bank.freeze(); + + assert_eq!( + bank.rc + .accounts + .accounts_db + .get_len_of_slots_with_uncleaned_pubkeys(), + 1 + ); + + bank.clean_accounts(); + assert_eq!( + bank.rc + .accounts + .accounts_db + .get_len_of_slots_with_uncleaned_pubkeys(), + 0 + ); +}