From 7f3cbfeef929efd9b45abe34587f8298d4146879 Mon Sep 17 00:00:00 2001 From: jeff washington Date: Fri, 30 Aug 2024 16:12:35 -0500 Subject: [PATCH 01/14] unref accounts in shink and pack when we're committed --- accounts-db/src/accounts_db.rs | 74 ++++++++++++++------------ accounts-db/src/ancient_append_vecs.rs | 43 ++++----------- 2 files changed, 52 insertions(+), 65 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index aa98b65e3aba72..e5489c0360de6d 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -3967,7 +3967,6 @@ impl AccountsDb { self.accounts_index.scan( accounts.iter().map(|account| account.pubkey()), |pubkey, slots_refs, _entry| { - let mut result = AccountsIndexScanResult::OnlyKeepInMemoryIfDirty; let stored_account = &accounts[index]; let mut do_populate_accounts_for_shrink = |ref_count, slot_list| { if stored_account.is_zero_lamport() @@ -4004,7 +4003,6 @@ impl AccountsDb { // not exist in the re-written slot. Unref it to keep the index consistent with // rewriting the storage entries. unrefed_pubkeys.push(pubkey); - result = AccountsIndexScanResult::Unref; dead += 1; } else { do_populate_accounts_for_shrink(ref_count, slot_list); @@ -4021,7 +4019,7 @@ impl AccountsDb { do_populate_accounts_for_shrink(ref_count, &slot_list); } index += 1; - result + AccountsIndexScanResult::OnlyKeepInMemoryIfDirty }, None, false, @@ -4309,6 +4307,35 @@ impl AccountsDb { .fetch_add(time.as_us(), Ordering::Relaxed); } + pub(crate) fn unref_shrunk_dead_accounts<'a>( + &self, + unrefed_pubkeys: impl Iterator, + slot: Slot, + ) { + self.accounts_index.scan( + unrefed_pubkeys, + |pubkey, slot_refs, _entry| { + // pubkeys in `unrefed_pubkeys` were unref'd in `shrink_collect` above under the assumption that we would shrink everything. + // Since shrink is not occurring, we need to addref the pubkeys to get the system back to the prior state since the account still exists at this slot. + if slot_refs.is_none() { + // We also expect that the accounts index must contain an + // entry for `pubkey`. Log a warning for now. In future, + // we will panic when this happens. + warn!("pubkey {pubkey} in slot {slot} was NOT found in accounts index during shrink"); + datapoint_warn!( + "accounts_db-shink_pubkey_missing_from_index", + ("store_slot", slot, i64), + ("pubkey", pubkey.to_string(), String), + ) + } + AccountsIndexScanResult::Unref + }, + None, + false, + ScanFilter::All, + ); + } + fn do_shrink_slot_store(&self, slot: Slot, store: &Arc) { if self.accounts_cache.contains(slot) { // It is not correct to shrink a slot while it is in the write cache until flush is complete and the slot is removed from the write cache. @@ -4341,43 +4368,24 @@ impl AccountsDb { // clean needs to take care of this dead slot self.accounts_index.add_uncleaned_roots([slot]); } - info!( - "Unexpected shrink for slot {} alive {} capacity {}, \ - likely caused by a bug for calculating alive bytes.", - slot, shrink_collect.alive_total_bytes, shrink_collect.capacity - ); + if !shrink_collect.all_are_zero_lamports { + // if all are zero lamports, then we expect that we would like to mark the whole slot dead, but we cannot. That's clean's job. + info!( + "Unexpected shrink for slot {} alive {} capacity {}, \ + likely caused by a bug for calculating alive bytes. All alive bytes are zero: {}, {}", + slot, shrink_collect.alive_total_bytes, shrink_collect.capacity, + store.alive_bytes(), shrink_collect.zero_lamport_single_ref_pubkeys.len() * aligned_stored_size(0), + ); + } self.shrink_stats .skipped_shrink .fetch_add(1, Ordering::Relaxed); - - self.accounts_index.scan( - shrink_collect.unrefed_pubkeys.into_iter(), - |pubkey, _slot_refs, entry| { - // pubkeys in `unrefed_pubkeys` were unref'd in `shrink_collect` above under the assumption that we would shrink everything. - // Since shrink is not occurring, we need to addref the pubkeys to get the system back to the prior state since the account still exists at this slot. - if let Some(entry) = entry { - entry.addref(); - } else { - // We also expect that the accounts index must contain an - // entry for `pubkey`. Log a warning for now. In future, - // we will panic when this happens. - warn!("pubkey {pubkey} in slot {slot} was NOT found in accounts index during shrink"); - datapoint_warn!( - "accounts_db-shink_pubkey_missing_from_index", - ("store_slot", slot, i64), - ("pubkey", pubkey.to_string(), String), - ) - } - AccountsIndexScanResult::OnlyKeepInMemoryIfDirty - }, - None, - true, - ScanFilter::All, - ); return; } + self.unref_shrunk_dead_accounts(shrink_collect.unrefed_pubkeys.iter().cloned(), slot); + let total_accounts_after_shrink = shrink_collect.alive_accounts.len(); debug!( "shrinking: slot: {}, accounts: ({} => {}) bytes: {} original: {}", diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 24ec415b792a3f..0729de3b824fca 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -13,12 +13,11 @@ use { ShrinkCollectAliveSeparatedByRefs, ShrinkStatsSub, }, accounts_file::AccountsFile, - accounts_index::{AccountsIndexScanResult, ScanFilter}, active_stats::ActiveStatItem, storable_accounts::{StorableAccounts, StorableAccountsBySlot}, }, rand::{thread_rng, Rng}, - rayon::prelude::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}, + rayon::prelude::{IntoParallelRefIterator, ParallelIterator}, solana_measure::measure_us, solana_sdk::clock::Slot, std::{ @@ -440,7 +439,6 @@ impl AccountsDb { accounts_to_combine.target_slots_sorted.last(), many_refs_newest.last().map(|accounts| accounts.slot) ); - self.addref_accounts_failed_to_shrink_ancient(accounts_to_combine.accounts_to_combine); return; } @@ -468,12 +466,19 @@ impl AccountsDb { if pack.len() > accounts_to_combine.target_slots_sorted.len() { // Not enough slots to contain the accounts we are trying to pack. - // `shrink_collect` previously unref'd some accounts. We need to addref them - // to restore the correct state since we failed to combine anything. - self.addref_accounts_failed_to_shrink_ancient(accounts_to_combine.accounts_to_combine); return; } + accounts_to_combine + .accounts_to_combine + .iter() + .for_each(|combine| { + self.unref_shrunk_dead_accounts( + combine.unrefed_pubkeys.iter().cloned(), + combine.slot, + ); + }); + let write_ancient_accounts = self.write_packed_storages(&accounts_to_combine, pack); self.finish_combine_ancient_slots_packed_internal( @@ -483,29 +488,6 @@ impl AccountsDb { ); } - /// for each account in `unrefed_pubkeys`, in each `accounts_to_combine`, addref - fn addref_accounts_failed_to_shrink_ancient<'a>( - &self, - accounts_to_combine: Vec>>, - ) { - self.thread_pool_clean.install(|| { - accounts_to_combine.into_par_iter().for_each(|combine| { - self.accounts_index.scan( - combine.unrefed_pubkeys.into_iter(), - |_pubkey, _slots_refs, entry| { - if let Some(entry) = entry { - entry.addref(); - } - AccountsIndexScanResult::OnlyKeepInMemoryIfDirty - }, - None, - true, - ScanFilter::All, - ); - }); - }); - } - /// calculate all storage info for the storages in slots /// Then, apply 'tuning' to filter out slots we do NOT want to combine. fn collect_sort_filter_ancient_slots( @@ -858,9 +840,6 @@ impl AccountsDb { } } let unpackable_slots_count = remove.len(); - remove.into_iter().rev().for_each(|i| { - self.addref_accounts_failed_to_shrink_ancient(vec![accounts_to_combine.remove(i)]); - }); target_slots_sorted.sort_unstable(); self.shrink_ancient_stats .slots_cannot_move_count From 007aaef02211a11b0eb6d9ff42b6001ebb9bba9b Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Tue, 3 Sep 2024 15:01:21 +0000 Subject: [PATCH 02/14] remove bad comments --- accounts-db/src/accounts_db.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index e5489c0360de6d..060cb31739e793 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4316,7 +4316,6 @@ impl AccountsDb { unrefed_pubkeys, |pubkey, slot_refs, _entry| { // pubkeys in `unrefed_pubkeys` were unref'd in `shrink_collect` above under the assumption that we would shrink everything. - // Since shrink is not occurring, we need to addref the pubkeys to get the system back to the prior state since the account still exists at this slot. if slot_refs.is_none() { // We also expect that the accounts index must contain an // entry for `pubkey`. Log a warning for now. In future, From fd0607bf082ab835ac6816d2ede870fbad1885dc Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Tue, 3 Sep 2024 15:59:17 +0000 Subject: [PATCH 03/14] rewrite shrink_ancient_fail_ref test --- accounts-db/src/ancient_append_vecs.rs | 47 ++++++++++++++++++-------- 1 file changed, 33 insertions(+), 14 deletions(-) diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 0729de3b824fca..fa029ff15ce163 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -1183,6 +1183,7 @@ pub mod tests { accounts_file::StorageAccess, accounts_hash::AccountHash, accounts_index::UpsertReclaim, + accounts_index::{AccountsIndexScanResult, ScanFilter}, append_vec::{ aligned_stored_size, AppendVec, AppendVecStoredAccountMeta, MAXIMUM_APPEND_VEC_FILE_SIZE, @@ -3812,7 +3813,7 @@ pub mod tests { } #[test] - fn test_addref_accounts_failed_to_shrink_ancient() { + fn test_shrink_ancient_expected_unref() { let db = AccountsDb::new_single_for_tests(); let empty_account = AccountSharedData::default(); for count in 0..3 { @@ -3820,7 +3821,8 @@ pub mod tests { .map(|_| solana_sdk::pubkey::new_rand()) .collect::>(); // how many of `many_ref_accounts` should be found in the index with ref_count=1 - let mut expected_ref_counts = HashMap::::default(); + let mut expected_ref_counts_before_unref = HashMap::::default(); + let mut expected_ref_counts_after_unref = HashMap::::default(); unrefed_pubkeys.iter().for_each(|k| { for slot in 0..2 { @@ -3836,8 +3838,8 @@ pub mod tests { UpsertReclaim::IgnoreReclaims, ); } - // set to 2 initially, made part of `unrefed_pubkeys`, expect it to be addref'd to 3 - expected_ref_counts.insert(*k, 3); + expected_ref_counts_before_unref.insert(*k, 2); + expected_ref_counts_after_unref.insert(*k, 1); }); let shrink_collect = ShrinkCollect:: { @@ -3857,17 +3859,34 @@ pub mod tests { total_starting_accounts: 0, all_are_zero_lamports: false, }; - let accounts_to_combine = AccountsToCombine { - accounts_keep_slots: HashMap::default(), - accounts_to_combine: vec![shrink_collect], - target_slots_sorted: Vec::default(), - unpackable_slots_count: 0, - }; - db.addref_accounts_failed_to_shrink_ancient(accounts_to_combine.accounts_to_combine); + + // Assert ref_counts before unref. + db.accounts_index.scan( + shrink_collect.unrefed_pubkeys.iter().cloned(), + |k, slot_refs, _entry| { + assert_eq!( + expected_ref_counts_before_unref.remove(k).unwrap(), + slot_refs.unwrap().1 + ); + AccountsIndexScanResult::OnlyKeepInMemoryIfDirty + }, + None, + false, + ScanFilter::All, + ); + assert!(expected_ref_counts_before_unref.is_empty()); + + // unref ref_counts + db.unref_shrunk_dead_accounts(shrink_collect.unrefed_pubkeys.iter().cloned(), 0); + + // Assert ref_counts after unref db.accounts_index.scan( - unrefed_pubkeys.iter(), + shrink_collect.unrefed_pubkeys.iter().cloned(), |k, slot_refs, _entry| { - assert_eq!(expected_ref_counts.remove(k).unwrap(), slot_refs.unwrap().1); + assert_eq!( + expected_ref_counts_after_unref.remove(k).unwrap(), + slot_refs.unwrap().1 + ); AccountsIndexScanResult::OnlyKeepInMemoryIfDirty }, None, @@ -3875,7 +3894,7 @@ pub mod tests { ScanFilter::All, ); // should have removed all of them - assert!(expected_ref_counts.is_empty()); + assert!(expected_ref_counts_after_unref.is_empty()); } } From cde8556c44fb4055e7aac6ff3eb64a5cb551a0d3 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Tue, 3 Sep 2024 16:58:47 +0000 Subject: [PATCH 04/14] fix comments --- accounts-db/src/ancient_append_vecs.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index fa029ff15ce163..1c63a7ba79d4ff 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -737,8 +737,15 @@ impl AccountsDb { let mut target_slots_sorted = Vec::with_capacity(len); // `shrink_collect` all accounts in the append vecs we want to combine. - // This also unrefs all dead accounts in those append vecs. - // This needs to serially iterate largest to smallest slot so that we unref older dead slots after we have visited the newer alive slots. + // The following comments are no longer correct? + // --This also unrefs all dead accounts in those append vecs. + // --This needs to serially iterate largest to smallest slot so that we unref older dead slots after we have visited the newer alive slots. + // We are no longer doing eager unref in shrink_collect. Therefore, we will no longer need to iter them serially? + // There is a subtle difference, which can lead to having more multi-refs than before? + // Consider account X in both slot x, and x+1 and x+2. + // With eager unref, we will only collect `one_ref`` X at slot x+2 after shrink. + // While without eager unref, we will collect X at `multi-ref` after shrink. + // Packing multi-ref is less efficient than `one_ref``. But it might be ok - next round of clean, hopefully can turn this multi-ref into one-ref. let mut accounts_to_combine = accounts_per_storage .iter() .map(|(info, unique_accounts)| { From 56e2d776de878bed8243565169b26bc9e429a3cb Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Tue, 3 Sep 2024 16:59:18 +0000 Subject: [PATCH 05/14] fix a test --- accounts-db/src/ancient_append_vecs.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 1c63a7ba79d4ff..f87d3378ffa504 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -1752,13 +1752,7 @@ pub mod tests { &tuning, many_ref_slots, ); - let mut expected_accounts_to_combine = num_slots; - if two_refs && many_ref_slots == IncludeManyRefSlots::Skip && num_slots > 2 - { - // We require more than 1 target slot. Since all slots have multi refs, we find no slots we can use as target slots. - // Thus, nothing can be packed. - expected_accounts_to_combine = 0; - } + let expected_accounts_to_combine = num_slots; (0..accounts_to_combine .target_slots_sorted .len() From be33d5d4334cf770d7fa55d614d8e78f62c099de Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Tue, 3 Sep 2024 17:06:13 +0000 Subject: [PATCH 06/14] fix another test --- accounts-db/src/ancient_append_vecs.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index f87d3378ffa504..ce87a58ac7d227 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -741,11 +741,11 @@ impl AccountsDb { // --This also unrefs all dead accounts in those append vecs. // --This needs to serially iterate largest to smallest slot so that we unref older dead slots after we have visited the newer alive slots. // We are no longer doing eager unref in shrink_collect. Therefore, we will no longer need to iter them serially? - // There is a subtle difference, which can lead to having more multi-refs than before? + // There is a subtle difference for zero lamport accounts, which can lead to having more multi-refs than before? // Consider account X in both slot x, and x+1 and x+2. // With eager unref, we will only collect `one_ref`` X at slot x+2 after shrink. // While without eager unref, we will collect X at `multi-ref` after shrink. - // Packing multi-ref is less efficient than `one_ref``. But it might be ok - next round of clean, hopefully can turn this multi-ref into one-ref. + // Packing multi-ref is less efficient than `one_ref``. But it might be ok - in next round of clean, hopefully, it can turn this from multi-ref into one-ref. let mut accounts_to_combine = accounts_per_storage .iter() .map(|(info, unique_accounts)| { @@ -1855,7 +1855,7 @@ pub mod tests { assert_eq!( accounts_to_combine.accounts_to_combine.len(), // if we are only trying to pack a single slot of multi-refs, it will succeed - if !two_refs || many_ref_slots == IncludeManyRefSlots::Include || num_slots == 1 {num_slots} else {0}, + if !two_refs || many_ref_slots == IncludeManyRefSlots::Include || num_slots == 1 || num_slots == 2 {num_slots} else {0}, "method: {method:?}, num_slots: {num_slots}, two_refs: {two_refs}, many_refs: {many_ref_slots:?}" ); From 87496cab5d382cb6698533e9f3323e6901be608e Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Tue, 3 Sep 2024 19:49:28 +0000 Subject: [PATCH 07/14] fmt --- accounts-db/src/ancient_append_vecs.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index ce87a58ac7d227..f97fa3ee70cfc8 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -1189,8 +1189,7 @@ pub mod tests { }, accounts_file::StorageAccess, accounts_hash::AccountHash, - accounts_index::UpsertReclaim, - accounts_index::{AccountsIndexScanResult, ScanFilter}, + accounts_index::{AccountsIndexScanResult, ScanFilter, UpsertReclaim}, append_vec::{ aligned_stored_size, AppendVec, AppendVecStoredAccountMeta, MAXIMUM_APPEND_VEC_FILE_SIZE, From 56e6a0f603818780a93dec0391036486b5a45b80 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Wed, 4 Sep 2024 13:16:14 +0000 Subject: [PATCH 08/14] del invalid comments --- accounts-db/src/ancient_append_vecs.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index f97fa3ee70cfc8..76b38d6affa058 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -737,14 +737,11 @@ impl AccountsDb { let mut target_slots_sorted = Vec::with_capacity(len); // `shrink_collect` all accounts in the append vecs we want to combine. - // The following comments are no longer correct? - // --This also unrefs all dead accounts in those append vecs. - // --This needs to serially iterate largest to smallest slot so that we unref older dead slots after we have visited the newer alive slots. // We are no longer doing eager unref in shrink_collect. Therefore, we will no longer need to iter them serially? // There is a subtle difference for zero lamport accounts, which can lead to having more multi-refs than before? // Consider account X in both slot x, and x+1 and x+2. // With eager unref, we will only collect `one_ref`` X at slot x+2 after shrink. - // While without eager unref, we will collect X at `multi-ref` after shrink. + // Without eager unref, we will collect X at `multi-ref` after shrink. // Packing multi-ref is less efficient than `one_ref``. But it might be ok - in next round of clean, hopefully, it can turn this from multi-ref into one-ref. let mut accounts_to_combine = accounts_per_storage .iter() From f8aefe04c7ded58c9aaaf1fcd733af50598f8198 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Wed, 4 Sep 2024 13:24:21 +0000 Subject: [PATCH 09/14] reviews: move log to new PR. --- accounts-db/src/accounts_db.rs | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 060cb31739e793..d20843bbefe1e4 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4367,16 +4367,6 @@ impl AccountsDb { // clean needs to take care of this dead slot self.accounts_index.add_uncleaned_roots([slot]); } - if !shrink_collect.all_are_zero_lamports { - // if all are zero lamports, then we expect that we would like to mark the whole slot dead, but we cannot. That's clean's job. - info!( - "Unexpected shrink for slot {} alive {} capacity {}, \ - likely caused by a bug for calculating alive bytes. All alive bytes are zero: {}, {}", - slot, shrink_collect.alive_total_bytes, shrink_collect.capacity, - store.alive_bytes(), shrink_collect.zero_lamport_single_ref_pubkeys.len() * aligned_stored_size(0), - ); - } - self.shrink_stats .skipped_shrink .fetch_add(1, Ordering::Relaxed); From 3e2380643c3499d0e9b77374ba57f7c81023164f Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Wed, 4 Sep 2024 13:55:27 +0000 Subject: [PATCH 10/14] Revert "reviews: move log to new PR." This reverts commit f8aefe04c7ded58c9aaaf1fcd733af50598f8198. --- accounts-db/src/accounts_db.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index d20843bbefe1e4..060cb31739e793 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4367,6 +4367,16 @@ impl AccountsDb { // clean needs to take care of this dead slot self.accounts_index.add_uncleaned_roots([slot]); } + if !shrink_collect.all_are_zero_lamports { + // if all are zero lamports, then we expect that we would like to mark the whole slot dead, but we cannot. That's clean's job. + info!( + "Unexpected shrink for slot {} alive {} capacity {}, \ + likely caused by a bug for calculating alive bytes. All alive bytes are zero: {}, {}", + slot, shrink_collect.alive_total_bytes, shrink_collect.capacity, + store.alive_bytes(), shrink_collect.zero_lamport_single_ref_pubkeys.len() * aligned_stored_size(0), + ); + } + self.shrink_stats .skipped_shrink .fetch_add(1, Ordering::Relaxed); From fb413ba7fd66c483f4fd0576f3cda88a73794169 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Wed, 4 Sep 2024 13:58:31 +0000 Subject: [PATCH 11/14] fix comments --- accounts-db/src/accounts_db.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 060cb31739e793..2c01576a8dec8f 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4309,13 +4309,12 @@ impl AccountsDb { pub(crate) fn unref_shrunk_dead_accounts<'a>( &self, - unrefed_pubkeys: impl Iterator, + pubkeys: impl Iterator, slot: Slot, ) { self.accounts_index.scan( - unrefed_pubkeys, + pubkeys, |pubkey, slot_refs, _entry| { - // pubkeys in `unrefed_pubkeys` were unref'd in `shrink_collect` above under the assumption that we would shrink everything. if slot_refs.is_none() { // We also expect that the accounts index must contain an // entry for `pubkey`. Log a warning for now. In future, @@ -4367,15 +4366,12 @@ impl AccountsDb { // clean needs to take care of this dead slot self.accounts_index.add_uncleaned_roots([slot]); } - if !shrink_collect.all_are_zero_lamports { - // if all are zero lamports, then we expect that we would like to mark the whole slot dead, but we cannot. That's clean's job. - info!( + info!( "Unexpected shrink for slot {} alive {} capacity {}, \ likely caused by a bug for calculating alive bytes. All alive bytes are zero: {}, {}", slot, shrink_collect.alive_total_bytes, shrink_collect.capacity, store.alive_bytes(), shrink_collect.zero_lamport_single_ref_pubkeys.len() * aligned_stored_size(0), ); - } self.shrink_stats .skipped_shrink From a5ec41ac4d3a2631cfde481a328e3b2aba6828ba Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Wed, 4 Sep 2024 13:59:47 +0000 Subject: [PATCH 12/14] revert log content --- accounts-db/src/accounts_db.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 2c01576a8dec8f..59a88f194798df 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -4367,11 +4367,10 @@ impl AccountsDb { self.accounts_index.add_uncleaned_roots([slot]); } info!( - "Unexpected shrink for slot {} alive {} capacity {}, \ - likely caused by a bug for calculating alive bytes. All alive bytes are zero: {}, {}", - slot, shrink_collect.alive_total_bytes, shrink_collect.capacity, - store.alive_bytes(), shrink_collect.zero_lamport_single_ref_pubkeys.len() * aligned_stored_size(0), - ); + "Unexpected shrink for slot {} alive {} capacity {}, \ + likely caused by a bug for calculating alive bytes.", + slot, shrink_collect.alive_total_bytes, shrink_collect.capacity + ); self.shrink_stats .skipped_shrink From 970c768bfdc9e9542a32b9d582d2916b6ea4ac21 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Wed, 4 Sep 2024 14:25:56 +0000 Subject: [PATCH 13/14] pr: rename --- accounts-db/src/accounts_db.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 59a88f194798df..5c058589bd3489 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -3954,7 +3954,7 @@ impl AccountsDb { ) -> LoadAccountsIndexForShrink<'a, T> { let count = accounts.len(); let mut alive_accounts = T::with_capacity(count, slot_to_shrink); - let mut unrefed_pubkeys = Vec::with_capacity(count); + let mut pubkeys_to_unref = Vec::with_capacity(count); let mut zero_lamport_single_ref_pubkeys = Vec::with_capacity(count); let mut alive = 0; @@ -4002,7 +4002,7 @@ impl AccountsDb { // It would have had a ref to the storage from the initial store, but it will // not exist in the re-written slot. Unref it to keep the index consistent with // rewriting the storage entries. - unrefed_pubkeys.push(pubkey); + pubkeys_to_unref.push(pubkey); dead += 1; } else { do_populate_accounts_for_shrink(ref_count, slot_list); @@ -4037,7 +4037,7 @@ impl AccountsDb { LoadAccountsIndexForShrink { alive_accounts, - unrefed_pubkeys, + unrefed_pubkeys: pubkeys_to_unref, zero_lamport_single_ref_pubkeys, all_are_zero_lamports, } From 15ff149d9dc04044159b397270a2eecc026f2dab Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Wed, 4 Sep 2024 14:39:40 +0000 Subject: [PATCH 14/14] pr: more rename --- accounts-db/src/accounts_db.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 5c058589bd3489..051ec76d832007 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -524,8 +524,9 @@ pub type BinnedHashData = Vec>; struct LoadAccountsIndexForShrink<'a, T: ShrinkCollectRefs<'a>> { /// all alive accounts alive_accounts: T, - /// pubkeys that were unref'd in the accounts index because they were dead - unrefed_pubkeys: Vec<&'a Pubkey>, + /// pubkeys that are going to be unref'd in the accounts index after we are + /// done with shrinking, because they are dead + pubkeys_to_unref: Vec<&'a Pubkey>, /// pubkeys that are the last remaining zero lamport instance of an account zero_lamport_single_ref_pubkeys: Vec<&'a Pubkey>, /// true if all alive accounts are zero lamport accounts @@ -4037,7 +4038,7 @@ impl AccountsDb { LoadAccountsIndexForShrink { alive_accounts, - unrefed_pubkeys: pubkeys_to_unref, + pubkeys_to_unref, zero_lamport_single_ref_pubkeys, all_are_zero_lamports, } @@ -4157,7 +4158,7 @@ impl AccountsDb { .for_each(|stored_accounts| { let LoadAccountsIndexForShrink { alive_accounts, - mut unrefed_pubkeys, + pubkeys_to_unref: mut unrefed_pubkeys, all_are_zero_lamports, mut zero_lamport_single_ref_pubkeys, } = self.load_accounts_index_for_shrink(stored_accounts, stats, slot);