Skip to content

Commit

Permalink
hookup LTHashCache
Browse files Browse the repository at this point in the history
  • Loading branch information
HaoranYi committed Mar 11, 2024
1 parent a9b5e5a commit e2082c4
Show file tree
Hide file tree
Showing 3 changed files with 46 additions and 16 deletions.
49 changes: 38 additions & 11 deletions accounts-db/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2387,6 +2387,22 @@ pub struct PubkeyHashAccount {
pub type LTHashCacheMap =
RwLock<HashMap<Pubkey, (Option<AccountSharedData>, Option<AccountLTHash>)>>;

pub struct LTHashCacheStat {
hits: u64,
before_size: u64,
after_size: u64,
}

impl std::fmt::Display for LTHashCacheStat {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"LTHashCacheStat {{ hits: {}, before_size: {}, after_size: {} }}",
self.hits, self.before_size, self.after_size
)
}
}

#[derive(Debug)]
pub struct AccountsDeltaHashCalculationOutput {
/// `delta_hash` computed from all the accounts that have been written to.
Expand Down Expand Up @@ -8125,30 +8141,34 @@ impl AccountsDb {
accumulated_accounts_hash: &mut AccountLTHash,
pubkeys: &[Pubkey],
written_accounts_before: &LTHashCacheMap,
_written_accounts_after: &LTHashCacheMap,
) {
written_accounts_after: &LTHashCacheMap,
) -> LTHashCacheStat {
// We are assuming it was easy to lookup a hash for everything written
// in `slot` when we were calculating the delta hash. So, caller passes
// in `pubkey_hash`. Note we don't need rewrites in `pubkey_hash`. these
// accounts had the same hash before and after. So, we only have to
// consider what was written that changed.
let mut old_ancestors = ancestors.clone();
old_ancestors.remove(&slot);
let _written_accounts_before = written_accounts_before.read().unwrap();
let mut cache_hits: u64 = 0;
let r_written_accounts_before = written_accounts_before.read().unwrap();
// if we want to look it up ourselves: let (hashes, _scan_us, _accumulate) = self.get_pubkey_hash_for_slot(slot);
let old = pubkeys
.iter()
.map(|k| {
// TODO: add optimization to cache lt_hash from last slot
//if let Some((account, hash)) = written_accounts_before.get(k) {
// Some(hash.unwrap()) // todo on demand calculate, calculate in bg
//} else {
self.load_with_fixed_root(&old_ancestors, k)
.map(|(account, _)| Self::lt_hash_account(&account, k))
//}
if let Some((_account, hash)) = r_written_accounts_before.get(k) {
// Get old `lt_hash` from cache of writes in parent slots
cache_hits += 1;
Some(hash.unwrap()) // TODO on demand calculate, calculate in bg
} else {
self.load_with_fixed_root(&old_ancestors, k)
.map(|(account, _)| Self::lt_hash_account(&account, k))
}
})
.collect::<Vec<_>>();
drop(r_written_accounts_before);

let mut w_written_accounts_after = written_accounts_after.write().unwrap();
pubkeys.iter().zip(old).for_each(|(k, old_hash)| {
if let Some(old) = old_hash {
// todo if old == new, then we can avoid this update altogether
Expand All @@ -8159,9 +8179,16 @@ impl AccountsDb {
.map(|(account, _)| Self::lt_hash_account(&account, k));
if let Some(new) = new {
accumulated_accounts_hash.add(&new);
//written_accounts_after.write().unwrap.insert(k, new);
w_written_accounts_after.insert(*k, (None, Some(new)));
}
});
drop(w_written_accounts_after);

LTHashCacheStat {
hits: cache_hits,
before_size: written_accounts_before.read().unwrap().len() as u64,
after_size: written_accounts_after.read().unwrap().len() as u64,
}
}

/// Calculate accounts delta hash for `slot`
Expand Down
12 changes: 7 additions & 5 deletions runtime/src/bank.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ use {
accounts::{AccountAddressFilter, Accounts, PubkeyAccountSlot},
accounts_db::{
AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig,
AccountsDeltaHashCalculationOutput, CalcAccountsHashDataSource,
AccountsDeltaHashCalculationOutput, CalcAccountsHashDataSource, LTHashCacheStat,
VerifyAccountsHashAndLamportsConfig,
},
accounts_hash::{
Expand Down Expand Up @@ -6349,14 +6349,15 @@ impl Bank {
delta_hash_timer.stop();

let mut lt_hash_timer = Measure::start("lt_hash_compute");
self.calculate_account_lt_hash(pubkeys);
let lt_hash_cache_stat = self.calculate_account_lt_hash(pubkeys);
lt_hash_timer.stop();

info!(
"slot_hash_time {} {}us {}us",
"slot_hash_time slot={} delta_hash={}us lt_hash={}us {}",
slot,
delta_hash_timer.as_us(),
lt_hash_timer.as_us(),
lt_hash_cache_stat
);

datapoint_info!(
Expand Down Expand Up @@ -6415,7 +6416,7 @@ impl Bank {
hash
}

fn calculate_account_lt_hash(&self, pubkeys: Vec<Pubkey>) {
fn calculate_account_lt_hash(&self, pubkeys: Vec<Pubkey>) -> LTHashCacheStat {
// // todo: note that this could be slow. My theory was we'd want to run this in the bg and have it ready later.
// // It could be ready for the snapshot, accounts hash abs loop, publishing to gossip later, or even to include in the bank hash NEXT slot.
// // having it ready THIS slot is a lot of work and adds loads into the dependency chain or makes every load more expensive (especially considering an account written multiple times in the same slot).
Expand Down Expand Up @@ -6446,7 +6447,7 @@ impl Bank {
.unwrap_or_default()
})
.unwrap_or_default();
self.rc.accounts.accounts_db.accumulate_accounts_lt_hash(
let lt_hash_cache_stat = self.rc.accounts.accounts_db.accumulate_accounts_lt_hash(
slot,
&self.ancestors,
&mut accumulated,
Expand All @@ -6456,6 +6457,7 @@ impl Bank {
);

*self.accumulated_accounts_hash.write().unwrap() = Some(accumulated);
lt_hash_cache_stat
}

/// The epoch accounts hash is hashed into the bank's hash once per epoch at a predefined slot.
Expand Down
1 change: 1 addition & 0 deletions runtime/src/bank/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13817,6 +13817,7 @@ fn test_failed_simulation_compute_units() {

#[test]
fn test_lt_hash_on_banks() {
solana_logger::setup();
let (genesis_config, _mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL);

// Helper fn to compute full lattice hash from accounts db.
Expand Down

0 comments on commit e2082c4

Please sign in to comment.