From b4f698a05f8ce6dd55c0f7ce224625f5248e1d86 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Mon, 18 Dec 2023 17:27:29 -0600 Subject: [PATCH] wip: lattic hash --- accounts-db/benches/accounts.rs | 32 +- accounts-db/src/accounts_cache.rs | 1 + accounts-db/src/accounts_db.rs | 489 +++++++++++++++++++++++++---- accounts-db/src/accounts_hash.rs | 493 +++++++++++++++++++++++++++++- ledger-tool/src/args.rs | 2 + ledger-tool/src/main.rs | 14 + runtime/src/bank.rs | 108 ++++++- runtime/src/bank/tests.rs | 105 +++++++ runtime/src/lib.rs | 1 + runtime/src/lthash_cache.rs | 64 ++++ svm/src/account_loader.rs | 8 + svm/src/transaction_processor.rs | 2 + svm/tests/mock_bank.rs | 2 + validator/src/cli.rs | 7 + validator/src/main.rs | 2 + 15 files changed, 1263 insertions(+), 67 deletions(-) create mode 100644 runtime/src/lthash_cache.rs diff --git a/accounts-db/benches/accounts.rs b/accounts-db/benches/accounts.rs index 9b3b70600a60a2..a8a2429468537b 100644 --- a/accounts-db/benches/accounts.rs +++ b/accounts-db/benches/accounts.rs @@ -10,7 +10,7 @@ use { solana_accounts_db::{ accounts::{AccountAddressFilter, Accounts}, accounts_db::{ - test_utils::create_test_accounts, AccountShrinkThreshold, AccountsDb, + test_utils::create_test_accounts, AccountShrinkThreshold, AccountsDb, LTHashCacheMap, VerifyAccountsHashAndLamportsConfig, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, }, accounts_index::{AccountSecondaryIndexes, ScanConfig}, @@ -26,6 +26,7 @@ use { }, std::{ collections::{HashMap, HashSet}, + hint::black_box, path::PathBuf, sync::{Arc, RwLock}, thread::Builder, @@ -341,3 +342,32 @@ fn bench_load_largest_accounts(b: &mut Bencher) { ) }); } + +/// Bench for computing lattice hash +/// test test_accounts_lt_hash ... bench: 5,420,262 ns/iter (+/- 60,676) -- seq +/// test test_accounts_lt_hash ... bench: 2,857,909 ns/iter (+/- 30,022) -- par_chunks (500) +/// test test_accounts_lt_hash ... bench: 2,476,487 ns/iter (+/- 55,287) -- par_chunks (250) +/// test test_accounts_lt_hash ... bench: 2,415,198 ns/iter (+/- 64,917) -- enum cache value +/// test test_accounts_lt_hash ... bench: 1,783,466 ns/iter (+/- 210,289) -- boxing AccountLTHash +/// test test_accounts_lt_hash ... bench: 1,631,536 ns/iter (+/- 310,180) -- boxing AccountSharedData +#[bench] +fn test_accounts_lt_hash(bencher: &mut Bencher) { + solana_logger::setup(); + let accounts_db = new_accounts_db(vec![PathBuf::from("accounts_lt_hash")]); + let accounts = Accounts::new(Arc::new(accounts_db)); + let mut pubkeys: Vec = vec![]; + create_test_accounts(&accounts, &mut pubkeys, 1_000, 0); + + let mut ancestor = Ancestors::default(); + ancestor.insert(0, 1); + + bencher.iter(|| { + black_box(accounts.accounts_db.accumulate_accounts_lt_hash( + 0, + &ancestor, + &RwLock::new(Some(AccountLTHash::default())), + <HashCacheMap::default(), + <HashCacheMap::default(), + )) + }); +} diff --git a/accounts-db/src/accounts_cache.rs b/accounts-db/src/accounts_cache.rs index e612ad741e84cc..89133ccd8eda1a 100644 --- a/accounts-db/src/accounts_cache.rs +++ b/accounts-db/src/accounts_cache.rs @@ -134,6 +134,7 @@ pub type CachedAccount = Arc; pub struct CachedAccountInner { pub account: AccountSharedData, hash: SeqLock>, + // TODO: add lt_hash pubkey: Pubkey, } diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 7f29edec19c949..3ee58d898c1cae 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -35,10 +35,10 @@ use { AccountsFile, AccountsFileError, MatchAccountOwnerError, ALIGN_BOUNDARY_OFFSET, }, accounts_hash::{ - AccountHash, AccountsDeltaHash, AccountsHash, AccountsHashKind, AccountsHasher, - CalcAccountsHashConfig, CalculateHashIntermediate, HashStats, IncrementalAccountsHash, - SerdeAccountsDeltaHash, SerdeAccountsHash, SerdeIncrementalAccountsHash, - ZeroLamportAccounts, + AccountHash, AccountLTHash, AccountsDeltaHash, AccountsHash, AccountsHashKind, + AccountsHasher, CalcAccountsHashConfig, CalculateHashIntermediate, HashStats, + IncrementalAccountsHash, SerdeAccountsDeltaHash, SerdeAccountsHash, + SerdeIncrementalAccountsHash, ZeroLamportAccounts, }, accounts_index::{ in_mem_accounts_index::StartupStats, AccountMapEntry, AccountSecondaryIndexes, @@ -500,6 +500,7 @@ pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig { create_ancient_storage: CreateAncientStorage::Pack, test_partitioned_epoch_rewards: TestPartitionedEpochRewards::CompareResults, test_skip_rewrites_but_include_in_bank_hash: false, + enable_accumulate_account_hash_calculation: false, }; pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig { index: Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS), @@ -513,6 +514,7 @@ pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig create_ancient_storage: CreateAncientStorage::Pack, test_partitioned_epoch_rewards: TestPartitionedEpochRewards::None, test_skip_rewrites_but_include_in_bank_hash: false, + enable_accumulate_account_hash_calculation: false, }; pub type BinnedHashData = Vec>; @@ -558,6 +560,7 @@ pub struct AccountsDbConfig { /// how to create ancient storages pub create_ancient_storage: CreateAncientStorage, pub test_partitioned_epoch_rewards: TestPartitionedEpochRewards, + pub enable_accumulate_account_hash_calculation: bool, } #[cfg(not(test))] @@ -915,6 +918,17 @@ impl<'a> LoadedAccount<'a> { } } + pub fn compute_lt_hash(&self, pubkey: &Pubkey) -> AccountLTHash { + match self { + LoadedAccount::Stored(stored_account_meta) => { + AccountsDb::lt_hash_account(stored_account_meta, stored_account_meta.pubkey()) + } + LoadedAccount::Cached(cached_account) => { + AccountsDb::lt_hash_account(&cached_account.account, pubkey) + } + } + } + pub fn take_account(self) -> AccountSharedData { match self { LoadedAccount::Stored(stored_account_meta) => { @@ -1289,6 +1303,8 @@ pub struct AccountsDb { /// true if this client should skip rewrites but still include those rewrites in the bank hash as if rewrites had occurred. pub test_skip_rewrites_but_include_in_bank_hash: bool, + pub enable_accumulate_account_hash_calculation: bool, + pub accounts_cache: AccountsCache, write_cache_limit_bytes: Option, @@ -2278,6 +2294,72 @@ pub struct PubkeyHashAccount { pub account: AccountSharedData, } +#[derive(Debug, Eq, PartialEq)] +pub enum LTHashCacheValue { + /// AccountLTHash is 2048 byte, while AccountSharedData is only 64 bytes. If + /// we are holding AccountLTHash directly, this would result the enum to be + /// 2056 bytes. + /// + /// From benchmark, i.e. `test_accounts_lt_hash`, it shows that boxing both + /// variants improves cache performance. + Hash(Box), + Account(Box), +} + +pub type LTHashCacheMap = RwLock>; + +pub struct LTHashCacheStat { + hits: u64, + load_old_accounts: u64, + skip_load_old_accounts: u64, + before_size: u64, + pub after_size: u64, +} + +impl std::fmt::Display for LTHashCacheStat { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!( + f, + "LTHashCacheStat {{ hits: {}, load_old_accounts: {}, \ + skip_load_old_accounts: {} before_size: {}, after_size: {} }}", + self.hits, + self.load_old_accounts, + self.skip_load_old_accounts, + self.before_size, + self.after_size + ) + } +} + +#[derive(Debug)] +pub struct AccountsDeltaHashCalculationOutput { + /// `delta_hash` computed from all the accounts that have been written to. + pub delta_hash: AccountsDeltaHash, + + /// Contains the accounts that have been written to. + /// + /// Note that `accounts` collected in the vec may be in arbitrary order as a + /// result of parallel scanning in accounts_db. + pub accounts: Vec, +} + +#[cfg(feature = "dev-context-only-utils")] +impl PartialEq for AccountsDeltaHashCalculationOutput { + fn eq(&self, other: &Self) -> bool { + if self.delta_hash != other.delta_hash { + return false; + } + + // The order of `accounts` doesn't matter for equality comparison. + // Convert into HashSet then compare. + return self.accounts.iter().copied().collect::>() + == other.accounts.iter().copied().collect::>(); + } +} + +#[cfg(feature = "dev-context-only-utils")] +impl Eq for AccountsDeltaHashCalculationOutput {} + impl AccountsDb { pub const DEFAULT_ACCOUNTS_HASH_CACHE_DIR: &'static str = "accounts_hash_cache"; @@ -2380,6 +2462,7 @@ impl AccountsDb { partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig::default(), epoch_accounts_hash_manager: EpochAccountsHashManager::new_invalid(), test_skip_rewrites_but_include_in_bank_hash: false, + enable_accumulate_account_hash_calculation: false, } } @@ -2448,6 +2531,11 @@ impl AccountsDb { .map(|config| config.test_skip_rewrites_but_include_in_bank_hash) .unwrap_or_default(); + let enable_accumulate_account_hash_calculation = accounts_db_config + .as_ref() + .map(|config| config.enable_accumulate_account_hash_calculation) + .unwrap_or_default(); + let partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig = PartitionedEpochRewardsConfig::new(test_partitioned_epoch_rewards); @@ -2467,6 +2555,7 @@ impl AccountsDb { partitioned_epoch_rewards_config, exhaustively_verify_refcounts, test_skip_rewrites_but_include_in_bank_hash, + enable_accumulate_account_hash_calculation, ..Self::default_with_accounts_index( accounts_index, base_working_path, @@ -5897,17 +5986,14 @@ impl AccountsDb { ) } - fn hash_account_data( + fn hash_account_data_internal( lamports: u64, owner: &Pubkey, executable: bool, rent_epoch: Epoch, data: &[u8], pubkey: &Pubkey, - ) -> AccountHash { - if lamports == 0 { - return AccountHash(Hash::default()); - } + ) -> blake3::Hasher { let mut hasher = blake3::Hasher::new(); // allocate a buffer on the stack that's big enough @@ -5939,9 +6025,52 @@ impl AccountsDb { buffer.extend_from_slice(pubkey.as_ref()); hasher.update(&buffer); + hasher + } + + fn hash_account_data( + lamports: u64, + owner: &Pubkey, + executable: bool, + rent_epoch: Epoch, + data: &[u8], + pubkey: &Pubkey, + ) -> AccountHash { + if lamports == 0 { + return AccountHash::default(); + } + let hasher = + Self::hash_account_data_internal(lamports, owner, executable, rent_epoch, data, pubkey); AccountHash(Hash::new_from_array(hasher.finalize().into())) } + pub fn lt_hash_account(account: &T, pubkey: &Pubkey) -> AccountLTHash { + Self::lt_hash_account_data( + account.lamports(), + account.owner(), + account.executable(), + account.rent_epoch(), + account.data(), + pubkey, + ) + } + + fn lt_hash_account_data( + lamports: u64, + owner: &Pubkey, + executable: bool, + rent_epoch: Epoch, + data: &[u8], + pubkey: &Pubkey, + ) -> AccountLTHash { + if lamports == 0 { + return AccountLTHash::default(); + } + let hasher = + Self::hash_account_data_internal(lamports, owner, executable, rent_epoch, data, pubkey); + AccountLTHash::new_from_reader(hasher.finalize_xof()) + } + fn bulk_assign_write_version(&self, count: usize) -> StoredMetaWriteVersion { self.write_version .fetch_add(count as StoredMetaWriteVersion, Ordering::AcqRel) @@ -6615,6 +6744,136 @@ impl AccountsDb { AccountsHasher::checked_cast_for_capitalization(balances.map(|b| b as u128).sum::()) } + /// # Accounts Hash Q/A + /// + /// ## How many places do we calculate accounts hash? + /// There are several places that we calculate accounts hash. + /// 1. when bank freeze, a hash of all the accounts that changed during the + /// slot is computed. And this hash is used for voting. + /// 2. when we take snapshot (incremental or full), a hash of all the + /// accounts up to the snapshot slot is computed and is included in the + /// snapshot. + /// 3. when we compute epoch hash, which is the full hash at 25% of the + /// epoch. This hash is included at 75% of the epoch. + /// + /// ## How to calculate accounts hash? + /// Given a set of accounts hashes, ordering them by pubkeys. Then compute a + /// merkle tree with fan-out of 16. Return the root hash as the final + /// account hash. + /// + /// ## Are account hash stored? + /// Yes. Account hash is stored in the AppendVec and write cache before + /// flush. + /// + /// ## Where do we compute hash for individual accounts? + /// Good question. We could compute the account hash when we store the + /// account in the accounts db. However, that's in the critical path of + /// execution. Therefore, we don't compute hash right at storing. The + /// computation is done in the background and inserted into the storage + /// later. + /// + /// ## Why are there two ways to compute accounts hash one from index one from storage? + /// Good question. Compute accounts hash from index is only used at start + /// up, when it is guaranteed that no execution is running and changing the + /// index. Because of that this computation, just can the index get the + /// latest hash for the account and send them to do the merkle tree hash. + /// However, for other hash calculation, the execution is running and index + /// will be changing. We can't rely on the index to get the account's hash. + /// Therefore, those computation must be done from the account storage. + /// Without the help of the index, we need to scan the storage to get all + /// accounts. And there might be duplication. Therefore, we have to dedup. + /// And then, merkle them. + /// + /// ## What's account hash cache? + /// Since accounts hash calculation repeat every X slots due to periodical + /// full/incremental snapshots. Scanning the appendvec is expensive. And + /// those appenvec may not change between each hash calculation. Therefore, + /// we introduce the accounts hash cache to store the san result and reuse + /// them if the underlying appendvec didn't change. + pub fn calculate_accounts_lt_hash_from_index( + &self, + ancestors: Option<&Ancestors>, + max_root: Option, + ) -> AccountLTHash { + let mut collect = Measure::start("lt_hash_collect"); + let keys: Vec<_> = self + .accounts_index + .account_maps + .iter() + .flat_map(|map| map.keys()) + .collect(); + collect.stop(); + + let mut scan = Measure::start("lt_hash_scan"); + // Pick a chunk size big enough to allow us to produce output vectors + // that are smaller than the overall size. We'll also accumulate the + // lamports within each chunk and fewer chunks results in less + // contention to accumulate the sum. + let chunks = crate::accounts_hash::MERKLE_FANOUT.pow(4); + let total_lamports = Mutex::::new(0); + let total_lt_hash = Mutex::::new(AccountLTHash::default()); + let total_count = Mutex::::new(0); + + let get_hashes = || { + keys.par_chunks(chunks).for_each(|pubkeys| { + let mut local_sum = 0u128; + let mut local_lt_hash_sum = AccountLTHash::default(); + let mut local_count = 0; + pubkeys.iter().for_each(|pubkey| { + if let Some(index_entry) = self.accounts_index.get_cloned(pubkey) { + let _ = self.accounts_index.get_account_info_with_and_then( + &index_entry, + ancestors, + max_root, + |(slot, account_info)| { + if !account_info.is_zero_lamport() { + if let Some(loaded_account) = self + .get_account_accessor( + slot, + pubkey, + &account_info.storage_location(), + ) + .get_loaded_account() + { + let balance = loaded_account.lamports(); + let lt_hash = loaded_account.compute_lt_hash(pubkey); + local_sum += balance as u128; + local_lt_hash_sum.add(<_hash); + local_count += 1; + } + } + }, + ); + } + }); + let mut total = total_lamports.lock().unwrap(); + *total = + AccountsHasher::checked_cast_for_capitalization(*total as u128 + local_sum); + let mut total = total_lt_hash.lock().unwrap(); + total.add(&local_lt_hash_sum); + let mut total = total_count.lock().unwrap(); + *total += local_count; + }) + }; + + get_hashes(); + + scan.stop(); + let total_lamports = *total_lamports.lock().unwrap(); + let total_lt_hash = *total_lt_hash.lock().unwrap(); + let total_count = *total_count.lock().unwrap(); + + datapoint_info!( + "calculate_accounts_lt_hash_from_index", + ("accounts_scan", scan.as_us(), i64), + ("collect", collect.as_us(), i64), + ("total_lamports", total_lamports, i64), + ("hash_count", total_count, i64), + ); + + total_lt_hash + } + pub fn calculate_accounts_hash_from_index( &self, max_slot: Slot, @@ -7598,84 +7857,192 @@ impl AccountsDb { Ok(()) } - /// helper to return - /// 1. pubkey, hash pairs for the slot - /// 2. us spent scanning - /// 3. Measure started when we began accumulating - pub fn get_pubkey_hash_for_slot( + /// A generic function to collect `interested` account data from a `slot` + /// into a vector for future processing. + /// + /// Returns the result vector, accounts-db scan time in microseconds, and a + /// start `Measure`` when we began accumulating. + fn collect_from_slot_into_vec( &self, slot: Slot, - ) -> (Vec<(Pubkey, AccountHash)>, u64, Measure) { + extract_cache_value: impl Fn(LoadedAccount) -> Option + Sync, + extract_storage_value: impl Fn(LoadedAccount) -> B + Sync, + flatten_storage_value: impl Fn(Pubkey, B) -> R, + ) -> (Vec, u64, Measure) + where + R: Send, + B: Send + Default + Sync, + { + type ScanResult = ScanStorageResult>; let mut scan = Measure::start("scan"); - let scan_result: ScanStorageResult<(Pubkey, AccountHash), DashMap> = - self.scan_account_storage( - slot, - |loaded_account: LoadedAccount| { - // Cache only has one version per key, don't need to worry about versioning - Some((*loaded_account.pubkey(), loaded_account.loaded_hash())) - }, - |accum: &DashMap, loaded_account: LoadedAccount| { - let loaded_hash = loaded_account.loaded_hash(); - accum.insert(*loaded_account.pubkey(), loaded_hash); - }, - ); + let scan_result: ScanResult = self.scan_account_storage( + slot, + |loaded_account: LoadedAccount| { + // Cache only has one version per key, don't need to worry about versioning + extract_cache_value(loaded_account) + }, + |accum: &DashMap, loaded_account: LoadedAccount| { + // Storage may have duplicates so only keep the latest version for each key + accum.insert( + *loaded_account.pubkey(), + extract_storage_value(loaded_account), + ); + }, + ); scan.stop(); let accumulate = Measure::start("accumulate"); - let hashes: Vec<_> = match scan_result { + let result: Vec<_> = match scan_result { ScanStorageResult::Cached(cached_result) => cached_result, - ScanStorageResult::Stored(stored_result) => stored_result.into_iter().collect(), + ScanStorageResult::Stored(stored_result) => stored_result + .into_iter() + .map(|(pubkey, storage_val)| flatten_storage_value(pubkey, storage_val)) + .collect(), }; - (hashes, scan.as_us(), accumulate) + (result, scan.as_us(), accumulate) } - /// Return all of the accounts for a given slot - pub fn get_pubkey_hash_account_for_slot(&self, slot: Slot) -> Vec { - type ScanResult = - ScanStorageResult>; - let scan_result: ScanResult = self.scan_account_storage( + /// helper to return + /// 1. pubkey, hash pairs for the slot + /// 2. us spent scanning + /// 3. Measure started when we began accumulating + pub fn get_pubkey_hash_for_slot( + &self, + slot: Slot, + ) -> (Vec<(Pubkey, AccountHash)>, u64, Measure) { + self.collect_from_slot_into_vec( slot, |loaded_account: LoadedAccount| { // Cache only has one version per key, don't need to worry about versioning + Some((*loaded_account.pubkey(), loaded_account.loaded_hash())) + }, + |loaded_account: LoadedAccount| loaded_account.loaded_hash(), + |pubkey, hash| (pubkey, hash), + ) + } + + pub fn get_pubkey_hash_account_for_slot(&self, slot: Slot) -> Vec { + self.collect_from_slot_into_vec( + slot, + |loaded_account: LoadedAccount| { Some(PubkeyHashAccount { pubkey: *loaded_account.pubkey(), hash: loaded_account.loaded_hash(), account: loaded_account.take_account(), }) }, - |accum: &DashMap, - loaded_account: LoadedAccount| { - // Storage may have duplicates so only keep the latest version for each key - accum.insert( - *loaded_account.pubkey(), - (loaded_account.loaded_hash(), loaded_account.take_account()), - ); + |loaded_account: LoadedAccount| { + (loaded_account.loaded_hash(), loaded_account.take_account()) }, - ); + |pubkey, hash_account| PubkeyHashAccount { + pubkey, + hash: hash_account.0, + account: hash_account.1, + }, + ) + .0 + } - match scan_result { - ScanStorageResult::Cached(cached_result) => cached_result, - ScanStorageResult::Stored(stored_result) => stored_result - .into_iter() - .map(|(pubkey, (hash, account))| PubkeyHashAccount { - pubkey, - hash, - account, - }) - .collect(), - } + pub fn get_pubkey_account_for_slot(&self, slot: Slot) -> Vec<(Pubkey, AccountSharedData)> { + self.collect_from_slot_into_vec( + slot, + |loaded_account| Some((*loaded_account.pubkey(), loaded_account.take_account())), + |loaded_account| loaded_account.take_account(), + |pubkey, account| (pubkey, account), + ) + .0 } - /// Wrapper function to calculate accounts delta hash for `slot` (only used for testing and benchmarking.) + /// Wrapper function to calculate accounts delta hash for `slot` (only used + /// for testing and benchmarking.) /// /// As part of calculating the accounts delta hash, get a list of accounts modified this slot /// (aka dirty pubkeys) and add them to `self.uncleaned_pubkeys` for future cleaning. #[cfg(feature = "dev-context-only-utils")] - pub fn calculate_accounts_delta_hash(&self, slot: Slot) -> AccountsDeltaHash { + pub fn calculate_accounts_delta_hash(&self, slot: Slot) -> AccountsDeltaHashCalculationOutput { self.calculate_accounts_delta_hash_internal(slot, None, HashMap::default()) } + pub fn accumulate_accounts_lt_hash( + &self, + slot: Slot, + ancestors: &Ancestors, + accumulated_accounts_hash: &RwLock>, + written_accounts_before: <HashCacheMap, + written_accounts_after: <HashCacheMap, + ) -> LTHashCacheStat { + let mut old_ancestors = ancestors.clone(); + old_ancestors.remove(&slot); + + let cache_hits = AtomicU64::new(0); + let old_account_loads = AtomicU64::new(0); + let skip_old_account_loads = AtomicU64::new(0); + + let accounts = self.get_pubkey_account_for_slot(slot); + + accounts.par_chunks(250).for_each(|accounts| { + let mut loc_cache_hits = 0_u64; + let mut loc_old_account_loads = 0_u64; + let mut loc_skip_old_account_loads = 0_u64; + let mut loc_accumulated_accounts_hash = AccountLTHash::default(); + let mut loc_written_accounts_after_map: HashMap = + HashMap::default(); + + let r_written_accounts_before = written_accounts_before.read().unwrap(); + accounts.iter().for_each(|(k, account)| { + let mut get_old_hash = || { + if let Some(val) = r_written_accounts_before.get(k) { + // Get old `lt_hash` from cache of writes in parent slot + match val { + LTHashCacheValue::Hash(hash) => { + loc_cache_hits += 1; + return Some(**hash); // TODO on demand calculate, calculate in bg + } + LTHashCacheValue::Account(account) => { + loc_skip_old_account_loads += 1; + return Some(Self::lt_hash_account(account.as_ref(), k)); + } + } + } + loc_old_account_loads += 1; + self.load_with_fixed_root(&old_ancestors, k) + .map(|(account, _)| Self::lt_hash_account(&account, k)) + }; + + if let Some(old) = get_old_hash() { + // todo if old == new, then we can avoid this update altogether + loc_accumulated_accounts_hash.sub(&old); + } + let new = Self::lt_hash_account(account, k); + loc_accumulated_accounts_hash.add(&new); + loc_written_accounts_after_map.insert(*k, LTHashCacheValue::Hash(Box::new(new))); + }); + drop(r_written_accounts_before); + accumulated_accounts_hash + .write() + .unwrap() + .as_mut() + .unwrap() + .add(&loc_accumulated_accounts_hash); + written_accounts_after + .write() + .unwrap() + .extend(loc_written_accounts_after_map); + cache_hits.fetch_add(loc_cache_hits, Ordering::AcqRel); + old_account_loads.fetch_add(loc_old_account_loads, Ordering::AcqRel); + skip_old_account_loads.fetch_add(loc_skip_old_account_loads, Ordering::AcqRel); + }); + + LTHashCacheStat { + hits: cache_hits.load(Ordering::Acquire), + load_old_accounts: old_account_loads.load(Ordering::Acquire), + skip_load_old_accounts: skip_old_account_loads.load(Ordering::Acquire), + before_size: written_accounts_before.read().unwrap().len() as u64, + after_size: written_accounts_after.read().unwrap().len() as u64, + } + } + /// Calculate accounts delta hash for `slot` /// /// As part of calculating the accounts delta hash, get a list of accounts modified this slot @@ -7685,9 +8052,10 @@ impl AccountsDb { slot: Slot, ignore: Option, mut skipped_rewrites: HashMap, - ) -> AccountsDeltaHash { + ) -> AccountsDeltaHashCalculationOutput { let (mut hashes, scan_us, mut accumulate) = self.get_pubkey_hash_for_slot(slot); - let dirty_keys = hashes.iter().map(|(pubkey, _hash)| *pubkey).collect(); + let dirty_keys: Vec<_> = hashes.iter().map(|(pubkey, _hash)| *pubkey).collect(); + let dirty_keys_copy = dirty_keys.clone(); hashes.iter().for_each(|(k, _h)| { skipped_rewrites.remove(k); @@ -7725,7 +8093,10 @@ impl AccountsDb { .skipped_rewrites_num .fetch_add(num_skipped_rewrites, Ordering::Relaxed); - accounts_delta_hash + AccountsDeltaHashCalculationOutput { + delta_hash: accounts_delta_hash, + accounts: dirty_keys_copy, + } } /// Set the accounts delta hash for `slot` in the `accounts_delta_hashes` map diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 06ce9b49f2cd25..836e7c132c1e77 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -5,12 +5,15 @@ use { ancestors::Ancestors, pubkey_bins::PubkeyBinCalculator24, }, + blake3::OutputReader, bytemuck::{Pod, Zeroable}, + core::slice, log::*, memmap2::MmapMut, rayon::prelude::*, solana_measure::{measure::Measure, measure_us}, solana_sdk::{ + bs58, hash::{Hash, Hasher}, pubkey::Pubkey, rent_collector::RentCollector, @@ -20,8 +23,11 @@ use { std::{ borrow::Borrow, convert::TryInto, + fmt, io::{Seek, SeekFrom, Write}, + mem, path::PathBuf, + str::FromStr, sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, Arc, @@ -29,6 +35,7 @@ use { thread, time, }, tempfile::tempfile_in, + thiserror::Error, }; pub const MERKLE_FANOUT: usize = 16; @@ -1162,6 +1169,8 @@ impl<'a> AccountsHasher<'a> { overall_sum = Self::checked_cast_for_capitalization( item.lamports as u128 + overall_sum as u128, ); + // note that we DO have to dedup and avoid zero lamport hashes... + // todo: probably we could accumulate here instead of writing every hash here and accumulating each hash later (in a map/fold reduce) hashes.write(&item.hash.0); } else { // if lamports == 0, check if they should be included @@ -1170,6 +1179,7 @@ impl<'a> AccountsHasher<'a> { // the hash of its pubkey let hash = blake3::hash(bytemuck::bytes_of(&item.pubkey)); let hash = Hash::new_from_array(hash.into()); + // todo: same as above hashes.write(&hash); } } @@ -1204,6 +1214,17 @@ impl<'a> AccountsHasher<'a> { let _guard = self.active_stats.activate(ActiveStatItem::HashMerkleTree); let mut hash_time = Measure::start("hash"); + // TODO + let mut _accumulated = Hash::default(); + let mut i = 0; + while i < cumulative.total_count() { + let slice = cumulative.get_slice(i); + slice.iter().for_each(|_hash| { + // todo: accumulate here if we weren't able to do it earlier + // accumulated += hash + }); + i += slice.len(); + } let (hash, _) = Self::compute_merkle_root_from_slices( cumulative.total_count(), MERKLE_FANOUT, @@ -1226,13 +1247,162 @@ pub enum ZeroLamportAccounts { /// Hash of an account #[repr(transparent)] -#[derive(Debug, Copy, Clone, Eq, PartialEq, Pod, Zeroable, AbiExample)] +#[derive(Default, Debug, Copy, Clone, Eq, PartialEq, Pod, Zeroable, AbiExample)] pub struct AccountHash(pub Hash); // Ensure the newtype wrapper never changes size from the underlying Hash // This also ensures there are no padding bytes, which is required to safely implement Pod const _: () = assert!(std::mem::size_of::() == std::mem::size_of::()); +/// # Account Lattice Hash Q/A +/// +/// ## What's account Lattice hash? +/// It is a new way to compute accounts hash based on the Algebra struct Lattice +/// defined on hash field. The field elements are 2048 bytes of hash of +/// individual accounts. This 2048 bytes of hash is computed from blake3 hash. +/// There are two operators defined on the field, add/sub, which are u16 x 1024 +/// wrapping add and subtract. +/// +/// ## How is the account Lattice hash used? +/// It is used to represent the account state for each slot. Each slot will +/// "add" in the new state of changed accounts and "sub" the old state of the +/// account. This way the accumulated hash of the slot is equal to sum all the +/// accounts state at that slot. The end goal is to replace the current merkle +/// tree hash. +/// +/// ## How to handle account deletion and account creation? +/// That's where the identity element in the lattice play it role. All accounts +/// with zero lamport are treated to have the "identity" hash. add/sub such +/// element don't impact the accumulated hash. +/// +/// ## Are we going to serialize/deserialize Lattice hash into DB storage? +/// This is an open question. Unlike 32 byte Hash, Lattice hash is 2K. Too +/// expensive to store on disk? Therefore, in current implementation, we don't +/// Serialize Lattice hash into disk. Lattice hash is just computed from the +/// account. +/// In future, when we decide to serialize Lattice hash, we can add +/// [Serialize, Deserialize, BorshSerialize, BorshDeserialize, BorshSchema] +/// to the struct. +/// +/// Lattice hash +pub const LT_HASH_BYTES: usize = 2048; +pub const LT_HASH_ELEMENT: usize = 1024; +#[repr(transparent)] +#[derive(Copy, Clone, Eq, PartialEq, Pod, Zeroable, AbiExample)] +pub struct AccountLTHash(pub [u16; LT_HASH_ELEMENT]); + +impl Default for AccountLTHash { + fn default() -> Self { + Self([0_u16; LT_HASH_ELEMENT]) + } +} + +impl AccountLTHash { + pub fn new(hash_slice: &[u8]) -> Self { + if hash_slice.len() == LT_HASH_BYTES { + let ptr = hash_slice.as_ptr() as *const u16; + let mut output = [0_u16; LT_HASH_ELEMENT]; + unsafe { + std::ptr::copy_nonoverlapping(ptr, output.as_mut_ptr(), LT_HASH_ELEMENT); + } + Self(output) + } else { + panic!("wrong size for LTHash"); + } + } + + pub const fn new_from_array(hash_array: [u16; LT_HASH_ELEMENT]) -> Self { + Self(hash_array) + } + + pub fn new_from_reader(mut reader: OutputReader) -> Self { + let mut output = [0_u16; LT_HASH_ELEMENT]; + + let ptr = + unsafe { slice::from_raw_parts_mut(output.as_mut_ptr() as *mut u8, LT_HASH_BYTES) }; + reader.fill(ptr); + Self(output) + } + + pub fn to_u16(self) -> [u16; LT_HASH_ELEMENT] { + self.0 + } + + pub fn add(&mut self, other: &AccountLTHash) { + for i in 0..LT_HASH_ELEMENT { + self.0[i] = self.0[i].wrapping_add(other.0[i]); + } + } + + pub fn sub(&mut self, other: &AccountLTHash) { + for i in 0..LT_HASH_ELEMENT { + self.0[i] = self.0[i].wrapping_sub(other.0[i]); + } + } + + pub fn finalize(&self) -> AccountHash { + let mut hasher = blake3::Hasher::new(); + hasher.update(self.as_ref()); + + AccountHash(Hash::new_from_array(hasher.finalize().into())) + } +} + +impl AsRef<[u8]> for AccountLTHash { + fn as_ref(&self) -> &[u8] { + let ptr = unsafe { slice::from_raw_parts(self.0.as_ptr() as *mut u8, LT_HASH_BYTES) }; + ptr + } +} + +impl AsRef<[u16]> for AccountLTHash { + fn as_ref(&self) -> &[u16] { + &self.0[..] + } +} + +impl fmt::Debug for AccountLTHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let u8_slice = AsRef::<[u8]>::as_ref(self); + write!(f, "{}", bs58::encode(u8_slice).into_string()) + } +} + +impl fmt::Display for AccountLTHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let u8_slice = AsRef::<[u8]>::as_ref(self); + write!(f, "{}", bs58::encode(u8_slice).into_string()) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Error)] +pub enum ParseLTHashError { + #[error("string decoded to wrong size for LThash")] + WrongSize, + #[error("failed to decoded string to LThash")] + Invalid, +} + +impl FromStr for AccountLTHash { + type Err = ParseLTHashError; + + fn from_str(s: &str) -> Result { + const MAX_BASE58_LEN: usize = 2798; + + if s.len() > MAX_BASE58_LEN { + return Err(ParseLTHashError::WrongSize); + } + let bytes = bs58::decode(s) + .into_vec() + .map_err(|_| ParseLTHashError::Invalid)?; + if bytes.len() != mem::size_of::() { + Err(ParseLTHashError::WrongSize) + } else { + Ok(AccountLTHash::new(&bytes)) + } + } +} + /// Hash of accounts #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum AccountsHashKind { @@ -1317,7 +1487,10 @@ impl From for SerdeIncrementalAccountsHash { #[cfg(test)] mod tests { - use {super::*, itertools::Itertools, std::str::FromStr, tempfile::tempdir}; + use { + super::*, crate::accounts_db::AccountsDb, itertools::Itertools, + solana_sdk::account::AccountSharedData, std::str::FromStr, tempfile::tempdir, + }; lazy_static! { static ref ACTIVE_STATS: ActiveStats = ActiveStats::default(); @@ -2437,4 +2610,320 @@ mod tests { 2, // accounts above are in 2 groups ); } + + #[test] + fn test_lt_account_hash() { + let h = AccountLTHash::default(); + assert!(h.0.iter().all(|&x| x == 0)); + assert!(h.0.len() == LT_HASH_ELEMENT); + + let owner = Pubkey::new_unique(); + let (key, account) = (Pubkey::new_unique(), AccountSharedData::new(0, 0, &owner)); + let h = AccountsDb::lt_hash_account(&account, &key); + assert!(h.0.iter().all(|&x| x == 0)); + assert!(h.0.len() == LT_HASH_ELEMENT); + } + + /// Test lattice hash computation (ported from FD implementation) + /// https://github.com/firedancer-io/firedancer/blob/lthash/src/ballet/lthash/test_fd_lthash.c + #[test] + fn test_lt_hash() { + let get_lt_hash = |input: &[u8]| -> AccountLTHash { + let mut hasher = blake3::Hasher::new(); + hasher.update(input); + AccountLTHash::new_from_reader(hasher.finalize_xof()) + }; + + // lt hash for "hello" + const LTHASH_HELLO: [u16; 1024] = [ + 0x8fea, 0x3d16, 0x86b3, 0x9282, 0x445e, 0xc591, 0x8de5, 0xb34b, 0x6e50, 0xc1f8, 0xb74e, + 0x868a, 0x08e9, 0x62c5, 0x674a, 0x0f20, 0x92e9, 0x5f40, 0x780d, 0x595b, 0x2e9a, 0x8733, + 0xd3f6, 0x014d, 0xccfa, 0xb2fe, 0xb62f, 0xef97, 0xd53f, 0x4135, 0x1a24, 0x8c33, 0x88c6, + 0x5676, 0xb58a, 0xe5c6, 0xab24, 0xfebc, 0x1e88, 0x4e5b, 0xc91a, 0x6f33, 0x933f, 0x412d, + 0x4822, 0x82c9, 0x3695, 0x9f69, 0xa107, 0xceb1, 0xff35, 0xe0df, 0x5dbe, 0xc000, 0xa883, + 0xd2df, 0x9a9c, 0x0343, 0x37d1, 0xd74c, 0x6a0e, 0xecbc, 0x6b6e, 0x6c79, 0xac92, 0x0905, + 0xc1cf, 0xaa9d, 0x6969, 0x736e, 0xcf4c, 0x0029, 0xcf70, 0x8f05, 0xde0f, 0x3fc9, 0x1db6, + 0x6d09, 0x2e08, 0xf4aa, 0x7208, 0x2cc1, 0x8cfb, 0x276e, 0xd62e, 0x2211, 0xf254, 0x8518, + 0x4d07, 0x1594, 0xf13f, 0xab12, 0xcc65, 0x4d4a, 0xceba, 0xfe93, 0x589f, 0x9f4e, 0xe7ea, + 0x63a8, 0xe612, 0x4ced, 0x58a5, 0x43b3, 0x39f6, 0x457c, 0x474f, 0x9aff, 0x5124, 0x63f6, + 0x450d, 0x3fc2, 0x9ccf, 0xf0c6, 0xc69f, 0x2bd3, 0x7a5d, 0x9574, 0x2f2c, 0xf934, 0xcc03, + 0x9342, 0x9998, 0x0da9, 0x6dd1, 0x460d, 0x3e00, 0xcdde, 0xf14d, 0x06ec, 0x6b74, 0x9551, + 0x68c4, 0x0f94, 0x4ac6, 0xed49, 0xd886, 0x24cb, 0x2a29, 0xf4a4, 0x3a83, 0x1f81, 0xe97a, + 0xfa1e, 0xb1c5, 0xfcd5, 0xb24c, 0xdb92, 0x2b62, 0xa4f1, 0x498e, 0xf00d, 0x63be, 0x7f6e, + 0x2c33, 0xdc3e, 0xb0fb, 0xe854, 0x8ee3, 0x5d95, 0xc613, 0x670b, 0xf4aa, 0x5570, 0x04bc, + 0xf606, 0x664f, 0xe5ec, 0xd65b, 0x0ea1, 0xf37c, 0x7745, 0x809b, 0x031e, 0xed80, 0x7254, + 0x211b, 0x0cce, 0x94e1, 0x6bf6, 0x95b1, 0x49ba, 0x64c0, 0x8ec9, 0x3b27, 0x5f21, 0xafc8, + 0x3b86, 0x2ea5, 0x8c30, 0x168e, 0xc147, 0x1fd5, 0x1637, 0x88f5, 0x9321, 0x63aa, 0xaae5, + 0x33bb, 0xd983, 0xb09a, 0xf24e, 0xa1e5, 0x2b39, 0xd434, 0x7135, 0x61ed, 0x57ad, 0x5940, + 0xe53f, 0x727d, 0x4882, 0x8c44, 0xa61b, 0x1b9f, 0xcee4, 0xf462, 0xc875, 0xc019, 0x9310, + 0x7dc2, 0xf55c, 0xcb36, 0x9505, 0xebb5, 0x8a2b, 0x2b07, 0x0a36, 0x3890, 0x54c8, 0x5a76, + 0xece7, 0x96f1, 0xe3f7, 0x6d99, 0x83e4, 0xff35, 0x1d04, 0x8783, 0xbf2e, 0xb846, 0x79a9, + 0x69ba, 0xb980, 0x28f6, 0x2325, 0x7d13, 0xc44c, 0xacba, 0x134e, 0xa877, 0x6b67, 0x8027, + 0xba94, 0xf564, 0x2174, 0xf985, 0x91c8, 0xd568, 0x319f, 0x6d4e, 0xa59b, 0xd344, 0x4a67, + 0x801d, 0x7aeb, 0x20c0, 0xba23, 0x9744, 0xdd93, 0x4cc5, 0x1148, 0xdf86, 0xad19, 0x06b7, + 0xa824, 0x8e56, 0x2cab, 0x9ad1, 0x5ec0, 0xd57c, 0x0f2b, 0x8d85, 0x65e2, 0xd9c0, 0xc824, + 0x3cae, 0xed26, 0x5c7c, 0x41f9, 0x4767, 0xf730, 0xe210, 0x2926, 0xb68f, 0xcf36, 0x22b9, + 0x5f1b, 0x4ae4, 0xcdcd, 0xe69a, 0x9f4c, 0x1036, 0x8e7c, 0x48de, 0xee0f, 0xbcbd, 0x6bc7, + 0x067a, 0x35e6, 0x98fa, 0x2dcb, 0xa442, 0xbcd0, 0xa02c, 0xc746, 0x60b9, 0x479e, 0x6f56, + 0xff1a, 0xe6f0, 0xef75, 0x5dad, 0x2096, 0xbd07, 0x96e2, 0x2bc6, 0xee33, 0xd122, 0x05f7, + 0x2177, 0x2dbc, 0x729b, 0xfdf0, 0x2c18, 0x800c, 0xdb7d, 0xfb19, 0x0002, 0x3895, 0x5b72, + 0xfbe7, 0x16ce, 0x671f, 0x2175, 0x7c84, 0xc8dc, 0x9690, 0xf594, 0x31b4, 0x47f3, 0xe3f2, + 0x8911, 0x747d, 0x25c2, 0x480a, 0x16ff, 0xba50, 0x8bcb, 0xe9d7, 0xec54, 0x7df4, 0x4b9a, + 0xf4bb, 0x3100, 0x86cc, 0x62c2, 0x9b73, 0x06d7, 0x157b, 0x0922, 0xab9e, 0x83a6, 0x2f28, + 0x30ce, 0x3eff, 0x5134, 0xc9d5, 0x74ae, 0x295c, 0x9af8, 0x482a, 0x61dc, 0xe555, 0x9c7c, + 0x57de, 0xfe56, 0xd898, 0x19c6, 0x444f, 0x9636, 0x9297, 0xea84, 0xeaba, 0xce24, 0x6dc0, + 0x14c3, 0x6e7d, 0x2a65, 0x3bb5, 0x679d, 0x22a1, 0x8ea1, 0xc564, 0xca61, 0x0b2a, 0x38ea, + 0xe029, 0xcf07, 0x4280, 0xff2a, 0x8697, 0x8d30, 0x185b, 0x919a, 0x8f7c, 0x046c, 0x9390, + 0x50ab, 0xcb51, 0x2334, 0x616f, 0x998f, 0x1d2d, 0xd294, 0x74f1, 0x822c, 0xe50d, 0xdcc6, + 0xbafc, 0x7d92, 0xe202, 0xe28e, 0x2e19, 0xecaa, 0x7cf5, 0x25aa, 0x7a1a, 0x389a, 0xc189, + 0x6af0, 0x6fa3, 0x16c3, 0xa318, 0x8cb5, 0x348e, 0x627b, 0xd144, 0x7d8d, 0xc43c, 0xca5b, + 0xf4bd, 0xb174, 0x4734, 0x3520, 0xbeb9, 0x4f79, 0xa628, 0xe4bd, 0x1bc7, 0xa9f4, 0x3ad2, + 0x959b, 0xe178, 0x1ba2, 0x48bb, 0x5e79, 0xd594, 0xf41e, 0x78ce, 0x685c, 0x79d4, 0xedae, + 0xe11d, 0x2172, 0xb9ab, 0x5ca2, 0xf9ff, 0x2812, 0x66b7, 0xed6d, 0x7eff, 0x960f, 0x4844, + 0x9484, 0x504a, 0x5b29, 0xca8b, 0xdafd, 0xa6b7, 0xef3a, 0xe2e0, 0xa137, 0x1b05, 0x16c2, + 0xefbd, 0x06ac, 0xf3f1, 0xa94f, 0xcade, 0x7087, 0x2ec9, 0x6543, 0x49a1, 0xf4c3, 0x3157, + 0xed65, 0xfc85, 0xefd4, 0x30b8, 0xa5e8, 0x093f, 0xcbe2, 0x8e2b, 0x2fd4, 0xae39, 0x3e37, + 0x37c5, 0xf02f, 0xf643, 0xc03e, 0xe4d0, 0xe305, 0xfd1a, 0x698d, 0x1285, 0x19de, 0x1582, + 0x251f, 0xe136, 0x3eec, 0x862b, 0xbf4d, 0xab67, 0x0c90, 0x3eb5, 0x58d0, 0xc300, 0x7f93, + 0x03e1, 0xf2f9, 0x78fd, 0x93b6, 0x5add, 0x865a, 0x8b20, 0x89e4, 0x7585, 0x6e40, 0x5a8a, + 0x8623, 0x7335, 0xa9e1, 0xfecf, 0x83cb, 0xe9de, 0xf07c, 0x36ca, 0x5a7b, 0x9fff, 0xe419, + 0x8e48, 0xa704, 0xbcab, 0x44ae, 0x6dfa, 0x810c, 0x94f4, 0x62fb, 0xa34e, 0xa9a5, 0x1d13, + 0x98a9, 0x88ba, 0x7bc2, 0x7a59, 0x188a, 0x1855, 0xd27d, 0x6781, 0xcf08, 0xde49, 0x5588, + 0x5c8b, 0x1f4a, 0xd22b, 0x3959, 0xe754, 0xf071, 0xdfc2, 0xf352, 0x255c, 0x2d36, 0x59d0, + 0x4621, 0x1ed0, 0xa0b5, 0x457d, 0xd3d7, 0xd137, 0x10ca, 0xeeb1, 0xec30, 0x96af, 0x9be5, + 0x2181, 0xe570, 0x8a33, 0x137e, 0x861e, 0xd155, 0x950d, 0xc6e4, 0x5c1f, 0xe4dc, 0x4466, + 0x7078, 0x75a5, 0x7a51, 0x1339, 0xa1a8, 0xcb89, 0xf383, 0xabf0, 0x0170, 0xbb1d, 0xea76, + 0xe491, 0xf911, 0xdc42, 0xec04, 0x82b8, 0xeadd, 0xc890, 0x505c, 0xafa7, 0x42cb, 0xfd99, + 0x127e, 0x0724, 0xd4f9, 0x94ef, 0xf060, 0x67fe, 0x038d, 0x2876, 0xb812, 0xbf05, 0xe904, + 0x003e, 0x2ee4, 0xe8f5, 0x0a66, 0xd790, 0x3ccc, 0x28be, 0xdbc2, 0x073c, 0xd4a5, 0x904c, + 0x60ad, 0x4f67, 0x77ac, 0xae49, 0x2d6c, 0x9220, 0xde9c, 0x2a2b, 0xf99c, 0xb54f, 0x8290, + 0x2e7d, 0x0ca1, 0xf79b, 0xc6ff, 0x3e6e, 0x8eb4, 0x66b1, 0xc6e6, 0x600f, 0xda08, 0xa933, + 0x2cad, 0x308a, 0x93f2, 0x4f70, 0x72d3, 0x56e0, 0x4ddd, 0x682c, 0x589f, 0xd461, 0x06ad, + 0x4e9a, 0x1af7, 0x901c, 0xa1d4, 0xb990, 0xbbcc, 0xdcbb, 0xe46f, 0xe585, 0x9800, 0x86e6, + 0xa735, 0xac0f, 0xb666, 0xaeac, 0x6e00, 0x8b36, 0xc4ce, 0x7261, 0xf078, 0xb42a, 0x86fb, + 0xd4d8, 0x1402, 0xd7ac, 0x69c6, 0x8b29, 0x66ce, 0x512d, 0x93f8, 0x811b, 0x7b2c, 0x1a3b, + 0x88fb, 0x8ca2, 0x197e, 0xbd7b, 0x5c5c, 0xf2c3, 0x803b, 0xe9f2, 0x6fd2, 0x8c05, 0x6966, + 0x2249, 0xceab, 0xe42b, 0x8195, 0x9ddc, 0x79ee, 0x1e35, 0x3fd4, 0x6fc4, 0x9b26, 0x85b0, + 0x45a4, 0x5a6b, 0xf43b, 0x0f07, 0x3104, 0x463d, 0x710a, 0x288e, 0x0dcd, 0x8f1a, 0xa307, + 0x6790, 0x1f2e, 0x991a, 0x7fcc, 0x241a, 0x80d9, 0x9f22, 0xac19, 0x0015, 0x5690, 0x45ba, + 0x4a3f, 0x84f1, 0x01c5, 0xc2b8, 0xa512, 0xffc0, 0xebbd, 0x3c5f, 0x66dc, 0x9fdd, 0xe066, + 0x5b39, 0x2fa1, 0x9432, 0xad65, 0xf397, 0x528a, 0x0c94, 0xe646, 0xbeb5, 0xe91c, 0x7d24, + 0x305c, 0x2c7b, 0x3f93, 0x860e, 0x6e39, 0x953a, 0xb010, 0xbb1b, 0x15a2, 0x369b, 0xf840, + 0xa258, 0xb39a, 0x522b, 0xedbb, 0x7fb9, 0xb94c, 0x45d0, 0x34c0, 0xd516, 0xb52d, 0xdce1, + 0x35e4, 0x3801, 0x3e5c, 0x6826, 0x3b4e, 0xc688, 0xe612, 0x64a8, 0x7898, 0xd07f, 0xa93e, + 0x0f42, 0x9392, 0xa877, 0xd68f, 0xd947, 0x7615, 0xac5e, 0x6f1c, 0x3a42, 0x04c8, 0x993e, + 0x53e5, 0x272e, 0x3021, 0xa3d2, 0xfc24, 0xbd1e, 0xf109, 0x3b8f, 0x6566, 0x48f9, 0x4ef5, + 0x777d, 0xcbaa, 0x029e, 0x8867, 0xda07, 0xa941, 0xeb45, 0x8ad2, 0x9c78, 0xa7c9, 0xdf67, + 0x2ec0, 0x8c0b, 0x6827, 0x18ca, 0x78c2, 0xc9df, 0x8a0e, 0x2aae, 0x4e31, 0xa7ec, 0xd0e5, + 0x748c, 0x1556, 0x44ad, 0xec45, 0x9e48, 0x13d1, 0x74ae, 0x1382, 0x6fdd, 0x6d15, 0x39b9, + 0x4a8a, 0xe31d, 0x4732, 0xb215, 0x5b5e, 0x5b7a, 0x5981, 0x4e94, 0x2ccd, 0x12b6, 0x5072, + 0x4e2b, 0x078f, 0x6896, 0xec47, 0x1165, 0x2625, 0x7fd3, 0xe652, 0xb05f, 0x6fc8, 0xfcb0, + 0xf199, 0xef36, 0x89db, 0xb274, 0x3e7c, 0x9985, 0xbc7a, 0xbd5e, 0x9f19, 0x6068, 0x47f2, + 0xc8db, 0x8025, 0x3e28, 0xf0b2, 0xbad1, 0x1237, 0x3b1d, 0xe2fc, 0x24b7, 0xb8b8, 0x4d82, + 0x5adc, 0x16b4, 0x1bb7, 0xedec, 0x9f94, 0x3557, 0x4ce4, 0x9995, 0xec62, 0xce8e, 0x597e, + 0x0161, 0x12f7, 0xa4d3, 0x98c7, 0xaede, 0x7e2d, 0xaa32, 0x98e4, 0xbfd7, 0x7e5a, 0x9507, + 0x8900, 0x1f5a, 0x46f5, 0x64cf, 0x6885, 0x6977, 0x26c4, 0xd94a, 0xe454, 0xcd75, 0xeda1, + 0x476b, 0x697c, 0xe522, 0x4ab9, 0x9e88, 0xde52, 0x67e4, 0xb170, 0x3270, 0x6291, 0x2422, + 0x95bb, 0xcf27, 0x90da, 0x12b2, 0x1305, 0x029b, 0x8427, 0x52e5, 0x3e64, 0x7a88, 0xd34d, + 0x68ee, 0x6099, 0xae6d, 0x622f, 0x1237, 0x33bd, 0x0143, 0x1e1c, 0xd463, 0xda74, 0x7272, + 0xa794, 0x1714, 0x8ec6, 0xf919, 0xdb4c, 0x60d7, 0xa3ae, 0xe336, 0x12bf, 0xc469, 0xfc67, + 0x9037, 0xcb6a, 0x5ebd, 0x85b5, 0x6c11, 0xa54e, 0x7e7f, 0xec0d, 0x46e5, 0x43ec, 0x6bf5, + 0x086f, 0x9421, 0xf5f7, 0xdbdf, 0x9994, 0x072c, 0xe5d9, 0x19a5, 0x8458, 0xec68, 0xba3f, + 0x9924, + ]; + let lt_hash_hello = get_lt_hash("hello".as_bytes()); + assert_eq!(AsRef::<[u16]>::as_ref(<_hash_hello), <HASH_HELLO); + + // lt hash for "world!" + const LTHASH_WORLD: [u16; 1024] = [ + 0x56dc, 0x1d98, 0x5420, 0x810d, 0x936f, 0x1011, 0xa2ff, 0x6681, 0x637e, 0x9f2c, 0x0024, + 0xebd4, 0xe5f2, 0x3382, 0xd48b, 0x209e, 0xb031, 0xe7a5, 0x026f, 0x55f1, 0xc0cf, 0xe566, + 0x9eb0, 0x0a41, 0x3eb1, 0x3d36, 0x1b7c, 0x83ca, 0x9aa6, 0x2264, 0x8794, 0xfb85, 0x71e0, + 0x64c9, 0x227c, 0xed27, 0x09e0, 0xe5d5, 0xc8da, 0x88a5, 0x8b49, 0xf5a5, 0x3137, 0xbeed, + 0xca0e, 0x7690, 0x0570, 0xa5de, 0x4e0b, 0x4827, 0x4ae4, 0x2dad, 0x0ce4, 0xd56f, 0x9819, + 0x5d4e, 0xe93a, 0x0024, 0xb7b2, 0xc7ba, 0xa00c, 0x6709, 0x1d26, 0x53d3, 0x17b1, 0xebdf, + 0xb18f, 0xb30a, 0x3d6b, 0x1d75, 0x26a0, 0x260e, 0x6585, 0x2ba6, 0xc88d, 0x70ef, 0xf6f4, + 0x8b7f, 0xc03b, 0x285b, 0x997b, 0x933e, 0xf139, 0xe097, 0x3eff, 0xd9f7, 0x605a, 0xaeec, + 0xee8d, 0x1527, 0x3bff, 0x7081, 0xda28, 0x4c0f, 0x44b0, 0xb7d0, 0x8f9b, 0xa657, 0x8e47, + 0xa405, 0x5507, 0xe5f9, 0x52ed, 0xc4e1, 0x300c, 0x0db3, 0xbf93, 0xfddd, 0x8f21, 0x10c5, + 0x4bfd, 0x5f13, 0xe136, 0xd72f, 0x1822, 0xb424, 0x996f, 0x8fdd, 0x0703, 0xa57f, 0x7923, + 0x0755, 0x7aee, 0x168d, 0x1525, 0xf912, 0xb48d, 0xfb9e, 0xd606, 0xb2ce, 0x98ef, 0x20fb, + 0xd21a, 0x8261, 0xd6db, 0x61bf, 0xdbc6, 0x02b1, 0x45e9, 0x1ffa, 0x071f, 0xa2c0, 0x74a8, + 0xae54, 0x59e1, 0xe2dc, 0x0ec9, 0x35ac, 0xbbb0, 0x5938, 0x2210, 0xcf9e, 0x2d9f, 0x7e01, + 0x2ab7, 0xd7d8, 0x8e36, 0x6b09, 0x262c, 0xb017, 0x9b6e, 0x1455, 0x7401, 0x8a8a, 0x6491, + 0x9de9, 0x7856, 0x8fb3, 0x8fcb, 0x3c05, 0x3e74, 0x40a4, 0x682a, 0x1a67, 0x9888, 0xb949, + 0xbb75, 0x6ef9, 0xc457, 0xa83a, 0x7965, 0x159e, 0xa415, 0x1c6b, 0x1b94, 0xaa10, 0x137d, + 0xbc3a, 0xc6bd, 0xf303, 0x7758, 0xc8da, 0xf5a3, 0x5826, 0x2b48, 0x9852, 0x3033, 0xfa85, + 0x3f85, 0x9b38, 0xd409, 0x4813, 0x36b2, 0x43d7, 0xdc0a, 0xfb54, 0x22b2, 0xf1e1, 0xfe5a, + 0x44ff, 0x217c, 0x158d, 0x2041, 0x7d2a, 0x4a78, 0xfc39, 0xb7db, 0x4786, 0xf8ee, 0xc353, + 0x96c2, 0x7be2, 0xd18d, 0x0407, 0x7b0e, 0x04f5, 0x3c63, 0x415e, 0xb1d1, 0x31cc, 0x25ac, + 0x9d8a, 0x4845, 0xd2b4, 0x0cdd, 0xf9a4, 0xae8f, 0x7fe5, 0x2285, 0xa749, 0x43cb, 0x16ae, + 0x09a9, 0xbd32, 0x923c, 0x2825, 0xbe21, 0xfa66, 0x2638, 0x3435, 0x6d79, 0xdf4b, 0xaab4, + 0xf2b1, 0x08f4, 0x64fd, 0x7364, 0x14e4, 0x1457, 0xbce3, 0xe114, 0xeccb, 0x2490, 0xae79, + 0x7448, 0x6310, 0xeff6, 0x2bb1, 0x79e7, 0xf5ae, 0xab40, 0xff6d, 0x889b, 0xe5f5, 0x69ee, + 0x3298, 0x512a, 0x2573, 0xf85c, 0xc69a, 0xb142, 0x3ed0, 0x7b9d, 0xc7a5, 0xea5d, 0xd085, + 0x4e99, 0xaf95, 0x404b, 0x8aca, 0x870f, 0x098a, 0x7c9c, 0x30cf, 0x3e16, 0x9010, 0xa94b, + 0x3cca, 0x00bc, 0xddb8, 0xbf1b, 0xc61a, 0x7121, 0xd668, 0xf4ba, 0xb339, 0xa66c, 0xd5b9, + 0x557c, 0x70a0, 0x34e4, 0x43a5, 0x9c32, 0x2e94, 0xa47f, 0x0b21, 0xb594, 0xb483, 0xf823, + 0x8c56, 0x9ee9, 0x71aa, 0xf97c, 0x1c62, 0xe003, 0xcbbe, 0xca8f, 0x58e5, 0xcbee, 0x758e, + 0x5511, 0x38da, 0x7816, 0xd6a1, 0x4550, 0x09e9, 0x682f, 0xf2ca, 0x5ea1, 0x58c2, 0x78ed, + 0xb630, 0xee80, 0xa2df, 0xa890, 0x8b42, 0x83d0, 0x7ec6, 0xa87e, 0x896c, 0xf649, 0x173d, + 0x4950, 0x5d0a, 0xd1a8, 0x7376, 0x4a4a, 0xe53f, 0x447d, 0x6efd, 0xd202, 0x1da3, 0x4825, + 0xd44b, 0x4343, 0xa1a9, 0x8aac, 0x5b50, 0xc8e6, 0x8086, 0xd64f, 0xd077, 0x76f0, 0x9443, + 0xcd70, 0x950d, 0x0369, 0xf1be, 0xb771, 0x5222, 0x4b40, 0x4846, 0x3fab, 0x1d5d, 0xc69d, + 0xa200, 0xe217, 0xb8bd, 0x2ef7, 0xed6b, 0xa78c, 0xe978, 0x0e16, 0x72bf, 0x05a3, 0xdcb4, + 0x4024, 0xfca2, 0x0219, 0x0d3e, 0xa83f, 0x6127, 0x33ab, 0x3ae5, 0xe7a1, 0x2e76, 0xf6f5, + 0xbee1, 0xa712, 0xab89, 0xf058, 0x71ed, 0xd39e, 0xa383, 0x5f64, 0xe2b6, 0xbe86, 0xee47, + 0x5bd8, 0x1536, 0xc6ed, 0x1c40, 0x836d, 0xcc40, 0x18ff, 0xe30a, 0xae2c, 0xc709, 0x7b40, + 0xddf8, 0x7b72, 0x97da, 0x3f71, 0x6dba, 0x578b, 0x980a, 0x2e0e, 0xd0c0, 0x871f, 0xde9b, + 0xa821, 0x1a41, 0xbff0, 0x04cb, 0x40d6, 0x9942, 0xf717, 0x2c1a, 0x65f9, 0xae3d, 0x9e4e, + 0x3ca6, 0x2d53, 0x3f6e, 0xc886, 0x5bbc, 0x9936, 0x09de, 0xb4ab, 0xc044, 0xa7a0, 0x8c37, + 0x383a, 0x3ab9, 0xcd16, 0x33c2, 0x908e, 0x75c3, 0x51da, 0xcb86, 0x4640, 0xe2b7, 0xbc2f, + 0x1bbb, 0xc1c0, 0xc4ce, 0x821d, 0x0a46, 0x178c, 0x1291, 0xfe6e, 0xd15f, 0x8d3e, 0x9d01, + 0x79b2, 0xfe4c, 0x75eb, 0x176c, 0x6be7, 0x6efa, 0xdcc6, 0x2127, 0xef2b, 0xb83a, 0xe10b, + 0x3206, 0xc2fe, 0x1a3d, 0x62c8, 0xf55e, 0xc594, 0x81ba, 0x0188, 0x962a, 0x0f1c, 0x2489, + 0xb3ca, 0x0d9a, 0xca06, 0xfe37, 0x2cb0, 0x87a1, 0xd33b, 0x31b0, 0x1efe, 0x08f2, 0xc55a, + 0xcb8a, 0x1633, 0x9df2, 0xc468, 0xd5e3, 0x3117, 0x3333, 0x488f, 0x4a9d, 0xc68f, 0x73f9, + 0xa82d, 0xe1af, 0xeb4e, 0xe41b, 0x33f5, 0x051f, 0x7592, 0x0528, 0x7aee, 0xc3eb, 0x7010, + 0x03f4, 0xaba4, 0x3e8f, 0x4abd, 0x2b41, 0x5390, 0x21a1, 0x6dc6, 0xd828, 0xa9b4, 0xc63a, + 0x3ab3, 0x14aa, 0xdc3a, 0x513f, 0x9886, 0x0000, 0x1169, 0xbba0, 0xb2fe, 0x4b09, 0x0198, + 0xcfff, 0xb898, 0x8cfe, 0x3def, 0x0b4b, 0xc154, 0x2491, 0x28d7, 0x757f, 0x06c5, 0x98c5, + 0x2dfa, 0xc068, 0xc74d, 0x521e, 0x70d5, 0xde35, 0x7718, 0xddf8, 0xa387, 0x807d, 0x0056, + 0x697b, 0x3043, 0x4ec8, 0xc2be, 0xa867, 0x0555, 0x2d3f, 0xc9f1, 0xfe7c, 0xe851, 0x5b85, + 0x2175, 0x741d, 0x1e5b, 0xafd3, 0xf757, 0x1bd9, 0x96df, 0x03df, 0x28d6, 0xbb77, 0xd5b5, + 0x03d3, 0xc078, 0x255b, 0xee39, 0x9705, 0x7fcc, 0xf16e, 0x16ca, 0x71d1, 0x9107, 0x00a5, + 0x103d, 0x0b12, 0xea24, 0xdf09, 0x7745, 0x7c1b, 0xcdba, 0x3093, 0x742e, 0x1e4c, 0x087b, + 0x9661, 0x0f3a, 0x6c51, 0xdc63, 0xb9d8, 0xf518, 0x09e1, 0x1426, 0xb6dc, 0xc246, 0xa273, + 0x5562, 0x8fde, 0x8f0e, 0xd034, 0x6651, 0x95ec, 0x6452, 0x95d4, 0xdf84, 0x118c, 0x44ab, + 0x328b, 0xf3d1, 0xb048, 0x2081, 0x748a, 0x05ee, 0x0f9b, 0x8110, 0x46e8, 0x6476, 0x8863, + 0x9850, 0xcb94, 0x2d2e, 0xcbac, 0xce53, 0x91bb, 0xa605, 0xfe50, 0x06f5, 0xef2d, 0xbd7c, + 0x736b, 0xf371, 0x6055, 0x6ab9, 0x135f, 0xb572, 0x5eb1, 0x7a36, 0xe4d5, 0xb998, 0xa7ea, + 0x1d06, 0x1275, 0x7f89, 0x3c92, 0xe906, 0x40c1, 0x8207, 0x058e, 0xa660, 0x72cd, 0xce25, + 0xd92a, 0x7731, 0x7633, 0xc6da, 0xb213, 0x0a93, 0x30c0, 0x58d3, 0x5ac0, 0x3ce7, 0x1028, + 0x4bcd, 0x86b9, 0x7f60, 0x22a6, 0x0ce9, 0xb569, 0x8c83, 0xb5bf, 0x2dd9, 0x7bdd, 0xc4bc, + 0xce57, 0x0b0b, 0x0a9c, 0xd74a, 0x6936, 0x0e40, 0xa874, 0x02b2, 0xfe8d, 0x0c16, 0xa0e0, + 0x5b01, 0x6f18, 0x6264, 0x4e77, 0x01a0, 0x3484, 0xe5b4, 0xf0cc, 0xd30d, 0x7904, 0x8216, + 0x46dd, 0x6fc0, 0xfa77, 0x8c3e, 0x5c10, 0xf776, 0x3043, 0x23dc, 0xfffc, 0x35c0, 0x8007, + 0x7993, 0xf198, 0x94eb, 0xe9bf, 0x7cc0, 0x170d, 0xea0d, 0xa7d0, 0x3d77, 0x7d6e, 0xc8f7, + 0x9a86, 0x6462, 0xc8d2, 0x357a, 0x8fa0, 0xf201, 0x55e5, 0x5235, 0x7da1, 0x52e6, 0xcc31, + 0xbecd, 0x3343, 0x343a, 0x2b1f, 0xd19e, 0x4cc6, 0x83a2, 0x6d16, 0x9c97, 0xa61b, 0xde54, + 0x6da1, 0xa57e, 0x44a7, 0x1e84, 0x98e7, 0x0e44, 0x5494, 0xe013, 0x0ed2, 0x0b3a, 0xa2db, + 0xc93a, 0xe6a0, 0xdccd, 0x84ac, 0xc898, 0xb974, 0x3d62, 0xe4cf, 0xcbc3, 0xa7bd, 0xde59, + 0x9391, 0x5635, 0xdac1, 0xd9b6, 0x1700, 0x7b35, 0x9555, 0x648e, 0xdacd, 0xffdf, 0xdd6a, + 0x9616, 0xea2e, 0xb1a4, 0x80c1, 0xdb21, 0x1076, 0x9543, 0xc165, 0x66d8, 0x26b8, 0x7095, + 0xdf4f, 0xcf4b, 0x1cec, 0xb231, 0x4037, 0x9fa5, 0x3637, 0xf96e, 0x215a, 0x65c9, 0x4696, + 0x734a, 0x556e, 0xb47f, 0x5160, 0xbf85, 0x850b, 0x06e0, 0x8181, 0x45f7, 0x202b, 0x86d1, + 0x5de7, 0x8ecd, 0xf77c, 0x031f, 0xa330, 0x79b4, 0xf38b, 0x59a8, 0x68cf, 0xf885, 0xfc87, + 0x4054, 0xe627, 0x845e, 0xa77f, 0x8450, 0x2302, 0x86e6, 0x2d94, 0xbbf7, 0x9e54, 0x2d79, + 0x1aa6, 0x6c50, 0xaef5, 0xbd9d, 0x85f3, 0x7b05, 0x5ec3, 0x6d70, 0x3ff3, 0x62a6, 0x252a, + 0x72c4, 0x2f56, 0xf9c1, 0xadf9, 0x00ff, 0xedfc, 0xddf3, 0x439c, 0x2777, 0xb742, 0xddfd, + 0x14fc, 0xa147, 0xd950, 0x37bd, 0x6296, 0xf816, 0x29af, 0x297c, 0xbf24, 0x6f05, 0xe8a4, + 0x17f4, 0xc8ab, 0xc0d1, 0x87b2, 0xeca2, 0x1b31, 0xa20b, 0xaad8, 0xd46c, 0x636f, 0x3975, + 0x363e, 0xdc79, 0xc450, 0x507e, 0xd8d5, 0x74c9, 0x56de, 0x92bc, 0x05eb, 0x749a, 0x3d98, + 0xf26a, 0x23fe, 0x4f29, 0x7856, 0x968c, 0x8794, 0x2835, 0x8dc3, 0xa440, 0x3b7b, 0xcc28, + 0x98e6, 0x36f1, 0xf305, 0x7641, 0xe895, 0x88d7, 0xedb3, 0x934a, 0x88c2, 0x0d19, 0xd558, + 0xe4bd, 0xe365, 0x5b52, 0xd26d, 0x77be, 0xe2cc, 0xd759, 0xb890, 0x5924, 0xf681, 0xfd5f, + 0xccf7, 0xc9b7, 0x544a, 0x1fe8, 0xacd1, 0x349e, 0xf889, 0x3e38, 0x980a, 0xfcf6, 0x4aaf, + 0xc970, 0x2699, 0xce48, 0x3229, 0x148e, 0x2c20, 0x28c1, 0x7fc3, 0x1cf6, 0x080c, 0x2f85, + 0x6ed0, 0xa884, 0xd958, 0xd555, 0x480d, 0x8874, 0xe8d4, 0x7c66, 0x226f, 0xbf4f, 0xbcea, + 0x3eeb, 0xac04, 0xc774, 0xbc95, 0xa97f, 0x8382, 0x165b, 0xc178, 0x708e, 0x8be5, 0x7eb4, + 0x84ad, 0x15d5, 0x5193, 0x4114, 0xd320, 0x9add, 0x85a3, 0x8b70, 0x1be3, 0xa39d, 0xbf82, + 0x6e04, 0x3bd2, 0xdf31, 0x0741, 0xaab8, 0xd398, 0x01f4, 0xdd3a, 0x2f9d, 0x2b55, 0x6811, + 0x171f, + ]; + let lt_hash_world = get_lt_hash("world!".as_bytes()); + assert_eq!(AsRef::<[u16]>::as_ref(<_hash_world), <HASH_WORLD); + + // add "hello" and "world!" + let mut expected_sum = [0_u16; LT_HASH_ELEMENT]; + for i in 0..LT_HASH_ELEMENT { + expected_sum[i] = LTHASH_HELLO[i].wrapping_add(LTHASH_WORLD[i]); + } + let mut lt_hash_sum = AccountLTHash::default(); + lt_hash_sum.add(<_hash_hello); + lt_hash_sum.add(<_hash_world); + assert_eq!(lt_hash_sum.to_u16(), expected_sum); + + // sub "hello" + let mut lt_hash = lt_hash_sum; + lt_hash.sub(<_hash_hello); + assert_eq!(lt_hash.to_u16(), LTHASH_WORLD); + + // sub "world" + let mut lt_hash = lt_hash_sum; + lt_hash.sub(<_hash_world); + + assert_eq!(lt_hash.to_u16(), LTHASH_HELLO); + } + + #[test] + fn test_lt_hash_finalize() { + let get_lt_hash = |input: &[u8]| -> AccountLTHash { + let mut hasher = blake3::Hasher::new(); + hasher.update(input); + AccountLTHash::new_from_reader(hasher.finalize_xof()) + }; + + let lt_hash_hello = get_lt_hash("hello".as_bytes()); + let final_hash = lt_hash_hello.finalize(); + let expected = Hash::from_str("6MmGXJNfa8JKF5XhtHuKEhnLmr38vcyqBCwEXCCFnRVN").unwrap(); + assert_eq!(final_hash.0, expected); + } + + #[test] + fn test_lt_hash_bs58_str() { + let get_lt_hash = |input: &[u8]| -> AccountLTHash { + let mut hasher = blake3::Hasher::new(); + hasher.update(input); + AccountLTHash::new_from_reader(hasher.finalize_xof()) + }; + + let lt_hash_hello = get_lt_hash("hello".as_bytes()); + let hash_base58_str = "Y6vmUpYzw5f3AF2b3pAhqqz7qanMsqmB55WeaajtPi5wsMtRRku8RCQSjvVo9kUk2H97ZksbcayVzqZHZ56jkgER2EkSyArAQTVzfYDmpc7DU2VcYEcRWMTbHMBB8mTZDggjKMMHEqsXKEqVyUbW3w8nzHSS2MdasjcA6nsnQHG4ofNXpjjDSXUfXBvysqsRshcCMsvKsFjrNwwDuMa7sVQpXXe9YpG4Sej59FHJS5ExVmZfk36hawMbU3CSnM87C3RiWXuu9cNHKdndw561sbHkWwb2bzgaKCfh47hiQXDGb7yg22vpgsEb4F6K44xT1jpQnhbGGtLbV2B72ca4tsJgszvmeucQWgGuRWvh84tpn7EuPmWJt2CPQT4kbpJSYQW2jihL19Ao5tW7HsHhsaKHypxq9J9tFa179g864aRjvrvitoyDZWJHsRzMuD4ZGrZEATaWsSWcbfLh6QzRQie4YHyTfpUbm7eJqu8ohqwMnA7V2crAw1RAw3dAz8BfHA8TLNzkcq6B4q1KpcSAnFCTgEHSuymwuggLGMuj6u2VboynNvj36A22C2EqXuV3CTT8RXeEufsUWJERQH7TcUU7h9rWJDzjrf2ZmtD7d4jtfBF1yM4Qkd21SWHPa8893EQDGuk65r3wU4minie7iLozy3v7kk6kxozr2Ym5cWVng5g28rjkaJtEiihwKkJUrJZwxaVZZiRqBih4dt2WzZ8cpU5y6zvvXgHLo9YG6sRPKTx3txMNJTbtvD3tNQHBtMamHrJqYFy1w9u5bTYWP21usxRMyRCDfhRmtYuynDNeCk5NDTfo92nZK2W48UZGgLf1kUNJ7hMg5MSArAgLLDmp4S3HDGFCTdZKXtLvvg1ytR5V339tdrKzYp85xYRs9C6xd3LK3veB8qWHnSyK7z1fYBgCtos8vTUscs2Z4aHnZ36eof6wEeQE8DDekuZL5Lea9g32t4XHMBCkFxW2hhQqJ5pv3KpAGWTUmgHULeN4PDZ4hLP5MQujKRRvz5CUsXUe2bdcYVCWuHpqJRyAMw4z5QXJpX5yW5kY9DxzsdCkAHSfHY3T6YVxT7PaJJJXChG8ybUHNXPW8Zu4ZPG77bR84FaKUD6LVMumrnbmqd8TKxca8LAJpBDK2YeBABYja5e6QKqJsehFk53XopopSScqVdSaZUi3WYSyiY2vPq23zCmYLq3iWjBvLt8JG321LJBfadSSspeKiT8jreqj5iQMyBYnXrS5JTW3FuEobKPmMyjJ9WpSML4HD65gc7BBCH4eefUKeNrVR6MWdohfVr8fHXbzxYUbWYkr1axVYdwuBx674jinrofMBAVwkDLegLSMbkpJ3U5pUQRTYfe6fFH7iPguBxQGmHa7dP4FHmH3x6Y3Mx3Ua3undWi1jf2f4geyn4rD7pTh97XwYU3uMoDZ329CeCdhFVphJLc6Jqx2v31pFBPqKmASi4iAZGPayK2mG4J5HFHrkRozjBbQGyGqpxpR9gfgQ1EZvao4zWjhoj4wXreziCvpqhDnwbKeQ7F1fLHbM3WsySXYs7PAiNFQAgD2oDTgpWZps9CobvFzs1GMjEYUn9cBXsmsx4UjfUT3vwx4xV38wfcq8mpz98nciwA1hnNAzBCd2CtueDQvooojKq5kbBmY1p9gCxju1wuiae58qAfg9x9NqJtViSyqdF3VKYSCkMVoFRJTKFuYNvUxcEWG1pLy8ofiiWoN2obPYaKxrab8jDNtWyWVKizh6skZLZGFsTrpecTkc5ULC2kbYmf3UgyALZAKoeLoFkiQQ4E9YMmGdsYrGQBnPnrGYFjjgajCLurnYgEYQuxofFutPCk3XzWJXdjFTQMoA9LtFc47wKiJtH3uZernWMoAjy1BgM7jFbiLN1muJRnLsnUa6vKwLfk8hbYn8SfN63N2CkqdhEeJ8dEKLvu741HfkX2gi8rWWR5oKWQA9NQrPK4zBttBJZHuWCsh1M9FAXj5HJCdCyZFJUdCiP2ebcMY6rrAkVbH8tyWMFCKnguaeeYF9nD9DPbjLgsYURnAhWprwNCJKe5sKrPBqD4FZVxwS8F3sctCBchMrpocj6Hhe1UtV4ke9q92pb6HyFZ8LRFQDZjuSEqGzaSP5w9cuuvckz4xDRzqFhrrG5ZfJovBDrz9hBiPkVFaZs7NfrdRaZGkurerqwk1gBT87JDk6UY6ZRAyEZQDdfVvPBqsbHSxB2BQZTWPvJqvqZE8K9dobd5A5WU6Rcxp9tBwANL7ouCNTFs9JzbAgghDMZUMHvqBNmQArfYBidLku1fjepUmVfV4YzfaAHFuKrk5NbcfACYHG8RHoAqBz24d5oUJacP354eHNWu1hYWbH73metXHqadrgk3YtQHKaK3bmZnxd4AVPVy916GfnfQoctKhXbTaJmz1kQVebCpX8xpCEdyrXStD4XzrxtrVzh3EsgRfonReZEmSRkRa7B3Wv7e8ZX2JcMYQbVddyiiVRD7apcNbFo9qfQpHd15wF51AE1P7D2HtoNtJUa5f3a3UiN6rXLE83ek57zDiSyDEwHsCdgWTeL92zvxxFiD1vewLdtfoZcmDAvxuVcFyeEEYBLQRPLoKxRoxvjKjqzrEvi9WXH89k986ufxZTSefgcYNz8udNeoejZEgeq9dp3VNF8stsKK5bP9wKg9vEiXk536Mqo622e1gJozYunB4C9fz4ZczS6gDemjpohPvs5nX39zgBN89RJ6apvJN9JgvMnxCpR93LCEVFMUXoBNwGRpJcPf4XZd3Rz5kedCZem5WwPH2cYZGC"; + let hash_from_str = AccountLTHash::from_str(hash_base58_str).unwrap(); + assert_eq!(lt_hash_hello, hash_from_str); + + let mut hash_base58_string = hash_base58_str.to_string(); + hash_base58_string.push_str(hash_base58_str); + assert_eq!( + AccountLTHash::from_str(&hash_base58_string), + Err(ParseLTHashError::WrongSize) + ); + + hash_base58_string.truncate(hash_base58_string.len() / 2); + assert!(AccountLTHash::from_str(&hash_base58_string).is_ok()); + + hash_base58_string.truncate(hash_base58_str.len() / 2); + assert_eq!( + AccountLTHash::from_str(&hash_base58_string), + Err(ParseLTHashError::WrongSize) + ); + + let mut hash_base58_string = hash_base58_str.to_string(); + hash_base58_string.replace_range(..1, "O"); + assert_eq!( + AccountLTHash::from_str(&hash_base58_string), + Err(ParseLTHashError::Invalid) + ); + } + + #[test] + #[should_panic] + fn test_lt_hash_new_size_too_big() { + let slice = [1; LT_HASH_BYTES - 1]; + let _h = AccountLTHash::new(&slice); + } + + #[test] + #[should_panic] + fn test_lt_hash_new_size_too_small() { + let slice = [1; LT_HASH_BYTES - 1]; + let _h = AccountLTHash::new(&slice); + } + + #[test] + fn test_lt_hash_new() { + let slice = [1; LT_HASH_BYTES]; + let _h = AccountLTHash::new(&slice); + } } diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index 1f0c06966deffc..dcee84de726759 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -139,6 +139,8 @@ pub fn get_accounts_db_config( test_partitioned_epoch_rewards, test_skip_rewrites_but_include_in_bank_hash: arg_matches .is_present("accounts_db_test_skip_rewrites"), + enable_accumulate_account_hash_calculation: arg_matches + .is_present("enable_accumulate_account_hash_calculation"), ..AccountsDbConfig::default() } } diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 4509e975cf10a1..01fce43860e6d8 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -603,6 +603,13 @@ fn main() { bank delta hash calculation", ) .hidden(hidden_unless_forced()); + + let accounts_db_enable_accumulate_account_hash_calculation = + Arg::with_name("enable_accumulate_account_hash_calculation") + .long("enable-accumulate-account-hash-calculation") + .help("Enable accumulate account hash calculation") + .hidden(hidden_unless_forced()); + let account_paths_arg = Arg::with_name("account_paths") .long("accounts") .value_name("PATHS") @@ -947,6 +954,7 @@ fn main() { .arg(&accountsdb_verify_refcounts) .arg(&accounts_db_skip_initial_hash_calc_arg) .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_enable_accumulate_account_hash_calculation) .arg(&use_snapshot_archives_at_startup), ) .subcommand( @@ -960,6 +968,7 @@ fn main() { .arg(&accountsdb_verify_refcounts) .arg(&accounts_db_skip_initial_hash_calc_arg) .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_enable_accumulate_account_hash_calculation) .arg(&use_snapshot_archives_at_startup), ) .subcommand( @@ -977,6 +986,7 @@ fn main() { .arg(&accountsdb_skip_shrink) .arg(&accountsdb_verify_refcounts) .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_enable_accumulate_account_hash_calculation) .arg(&verify_index_arg) .arg(&accounts_db_skip_initial_hash_calc_arg) .arg(&ancient_append_vecs) @@ -1096,6 +1106,7 @@ fn main() { .arg(&disable_disk_index) .arg(&accountsdb_verify_refcounts) .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_enable_accumulate_account_hash_calculation) .arg(&accounts_db_skip_initial_hash_calc_arg) .arg(&halt_at_slot_arg) .arg(&hard_forks_arg) @@ -1137,6 +1148,7 @@ fn main() { .arg(&disable_disk_index) .arg(&accountsdb_verify_refcounts) .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_enable_accumulate_account_hash_calculation) .arg(&accounts_db_skip_initial_hash_calc_arg) .arg(&accountsdb_skip_shrink) .arg(&ancient_append_vecs) @@ -1354,6 +1366,7 @@ fn main() { .arg(&disable_disk_index) .arg(&accountsdb_verify_refcounts) .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_enable_accumulate_account_hash_calculation) .arg(&accounts_db_skip_initial_hash_calc_arg) .arg(&halt_at_slot_arg) .arg(&hard_forks_arg) @@ -1415,6 +1428,7 @@ fn main() { .arg(&disable_disk_index) .arg(&accountsdb_verify_refcounts) .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) + .arg(&accounts_db_enable_accumulate_account_hash_calculation) .arg(&accounts_db_skip_initial_hash_calc_arg) .arg(&halt_at_slot_arg) .arg(&hard_forks_arg) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index cfcd32ef7ff456..630520b0646fcc 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -46,6 +46,7 @@ use { epoch_rewards_hasher::hash_rewards_into_partitions, epoch_stakes::{EpochStakes, NodeVoteAccounts}, installed_scheduler_pool::{BankWithScheduler, InstalledSchedulerRwLock}, + lthash_cache::LTHashCache, serde_snapshot::BankIncrementalSnapshotPersistence, snapshot_hash::SnapshotHash, stake_account::StakeAccount, @@ -72,10 +73,12 @@ use { accounts::{AccountAddressFilter, Accounts, PubkeyAccountSlot}, accounts_db::{ AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, - CalcAccountsHashDataSource, VerifyAccountsHashAndLamportsConfig, + AccountsDeltaHashCalculationOutput, CalcAccountsHashDataSource, LTHashCacheStat, + LTHashCacheValue, VerifyAccountsHashAndLamportsConfig, }, accounts_hash::{ - AccountHash, AccountsHash, CalcAccountsHashConfig, HashStats, IncrementalAccountsHash, + AccountHash, AccountLTHash, AccountsHash, CalcAccountsHashConfig, HashStats, + IncrementalAccountsHash, }, accounts_index::{AccountSecondaryIndexes, IndexKey, ScanConfig, ScanResult, ZeroLamport}, accounts_partition::{self, Partition, PartitionIndex}, @@ -514,6 +517,8 @@ impl PartialEq for Bank { return true; } let Self { + accumulated_accounts_hash: _, + lt_hash_cache: _, skipped_rewrites: _, rc: _, status_cache: _, @@ -830,6 +835,10 @@ pub struct Bank { pub incremental_snapshot_persistence: Option, + pub accumulated_accounts_hash: RwLock>, + + pub lt_hash_cache: LTHashCache, + epoch_reward_status: EpochRewardStatus, transaction_processor: TransactionBatchProcessor, @@ -961,6 +970,8 @@ pub(super) enum RewardInterval { impl Bank { fn default_with_accounts(accounts: Accounts) -> Self { let mut bank = Self { + accumulated_accounts_hash: RwLock::default(), + lt_hash_cache: LTHashCache::default(), skipped_rewrites: Mutex::default(), incremental_snapshot_persistence: None, rc: BankRc::new(accounts, Slot::default()), @@ -1272,6 +1283,20 @@ impl Bank { let accounts_data_size_initial = parent.load_accounts_data_size(); let mut new = Self { + accumulated_accounts_hash: RwLock::new( + *parent.accumulated_accounts_hash.read().unwrap(), + ), + + lt_hash_cache: LTHashCache { + // Start this slot's old written accounts with the accounts + // written in the last slot. many accounts are written every + // slot (like votes) + written_accounts_before: RwLock::new(std::mem::take( + &mut parent.lt_hash_cache.written_accounts_after.write().unwrap(), + )), + ..LTHashCache::default() + }, + skipped_rewrites: Mutex::default(), incremental_snapshot_persistence: None, rc, @@ -1347,6 +1372,18 @@ impl Bank { collector_fee_details: RwLock::new(CollectorFeeDetails::default()), }; + if new.accumulated_accounts_hash.read().unwrap().is_none() { + info!("start computing lt_hash {}", new.slot); + let lt_hash = new + .rc + .accounts + .accounts_db + .calculate_accounts_lt_hash_from_index(None, Some(new.slot)); + let mut w_lt_hash = new.accumulated_accounts_hash.write().unwrap(); + *w_lt_hash = Some(lt_hash); + info!("finish computing lt_hash {} {}", new.slot, lt_hash); + } + new.transaction_processor = TransactionBatchProcessor::new( new.slot, new.epoch, @@ -1834,6 +1871,9 @@ impl Bank { ); let stakes_accounts_load_duration = now.elapsed(); let mut bank = Self { + // todo: this has to be saved and loaded in bank persistence somehow + accumulated_accounts_hash: RwLock::default(), + lt_hash_cache: LTHashCache::default(), skipped_rewrites: Mutex::default(), incremental_snapshot_persistence: fields.incremental_snapshot_persistence, rc: bank_rc, @@ -6423,7 +6463,12 @@ impl Bank { let ignore = (!self.is_partitioned_rewards_feature_enabled() && self.force_partition_rewards_in_first_block_of_epoch()) .then_some(sysvar::epoch_rewards::id()); - let accounts_delta_hash = self + + let mut delta_hash_timer = Measure::start("delta_hash_compute"); + let AccountsDeltaHashCalculationOutput { + delta_hash: accounts_delta_hash, + accounts: _pubkeys, + } = self .rc .accounts .accounts_db @@ -6432,6 +6477,27 @@ impl Bank { ignore, self.skipped_rewrites.lock().unwrap().clone(), ); + delta_hash_timer.stop(); + + let mut lt_hash_timer = Measure::start("lt_hash_compute"); + let lt_hash_cache_stat = self.calculate_account_lt_hash(); + lt_hash_timer.stop(); + + info!( + "slot_hash_time slot={} delta_hash={}us lt_hash={}us {}", + slot, + delta_hash_timer.as_us(), + lt_hash_timer.as_us(), + lt_hash_cache_stat + ); + + datapoint_info!( + "calc_accounts_hash_per_slot", + ("slot", slot, i64), + ("delta_hash_us", delta_hash_timer.as_us(), i64), + ("lt_hash_us", lt_hash_timer.as_us(), i64), + ("num_lt_hashes", lt_hash_cache_stat.after_size, i64), + ); let mut signature_count_buf = [0u8; 8]; LittleEndian::write_u64(&mut signature_count_buf[..], self.signature_count()); @@ -6467,7 +6533,7 @@ impl Bank { .get_bank_hash_stats(slot) .expect("No bank hash stats were found for this bank, that should not be possible"); info!( - "bank frozen: {slot} hash: {hash} accounts_delta: {} signature_count: {} last_blockhash: {} capitalization: {}{}, stats: {bank_hash_stats:?}", + "bank frozen: {slot} hash: {hash} accounts_delta: {} signature_count: {} last_blockhash: {} capitalization: {}{}, lt_hash: {:?}, stats: {bank_hash_stats:?}", accounts_delta_hash.0, self.signature_count(), self.last_blockhash(), @@ -6476,11 +6542,34 @@ impl Bank { format!(", epoch_accounts_hash: {:?}", epoch_accounts_hash.as_ref()) } else { "".to_string() - } + }, + self.accumulated_accounts_hash.read().unwrap() ); hash } + fn calculate_account_lt_hash(&self) -> LTHashCacheStat { + let slot = self.slot; + let parent_accumulated_hash = self + .parent() + .map(|bank| { + bank.accumulated_accounts_hash + .read() + .unwrap() + .unwrap_or_default() + }) + .unwrap_or_default(); + + *self.accumulated_accounts_hash.write().unwrap() = Some(parent_accumulated_hash); + self.rc.accounts.accounts_db.accumulate_accounts_lt_hash( + slot, + &self.ancestors, + &self.accumulated_accounts_hash, + &self.lt_hash_cache.written_accounts_before, + &self.lt_hash_cache.written_accounts_after, + ) + } + /// The epoch accounts hash is hashed into the bank's hash once per epoch at a predefined slot. /// Should it be included in *this* bank? fn should_include_epoch_accounts_hash(&self) -> bool { @@ -7700,6 +7789,15 @@ impl TransactionProcessingCallback for Bank { LoadedProgramMatchCriteria::NoCriteria } } + + fn insert_old_written_account(&self, key: &Pubkey, account: &AccountSharedData) { + // TODO: find all the places this has to happen + // Start computing LTHash for the old account state in background. + let mut old_written_accounts = self.lt_hash_cache.written_accounts_before.write().unwrap(); + old_written_accounts + .entry(*key) + .or_insert(LTHashCacheValue::Account(Box::new(account.clone()))); + } } #[cfg(feature = "dev-context-only-utils")] diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index bdacbb1304a028..06e15539d32c9a 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -14035,3 +14035,108 @@ fn test_deploy_last_epoch_slot() { let result_with_feature_enabled = bank.process_transaction(&transaction); assert_eq!(result_with_feature_enabled, Ok(())); } + +#[test] +fn test_lt_hash_on_banks() { + solana_logger::setup(); + let (genesis_config, _mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); + + // Helper fn to compute full lattice hash from accounts db. + let compute_full_lt_hash = |bank: &Arc| { + bank.rc + .accounts + .accounts_db + .calculate_accounts_lt_hash_from_index(Some(&bank.ancestors), None) + }; + + // Test genesis bank with default system accounts. + // + // Check that lattice hash is computed after `freeze`. + let bank0 = Bank::new_for_tests(&genesis_config); + let bank0 = Arc::new(bank0); + bank0.freeze(); + let lt_hash0 = *bank0.accumulated_accounts_hash.read().unwrap(); + assert!(lt_hash0.is_some()); + + // Test bank1 with no user account update. + // In this case, it is simulating a bank with no transaction (i.e. full of + // empty ticks). + // + // 1. Check that lattice hash for bank1 is different from bank0 due to + // sysvar accounts changes. + // 2. Check that lattice hash computed in bank1 is the same as the full + // lattice hash computed from accounts db. + let bank1 = Arc::new(new_from_parent(bank0.clone())); + bank1.freeze(); + let lt_hash1 = *bank1.accumulated_accounts_hash.read().unwrap(); + assert_ne!(lt_hash1.unwrap(), lt_hash0.unwrap()); + assert_eq!(lt_hash1.unwrap(), compute_full_lt_hash(&bank1)); + + // Test bank2 with simulated user account update. + // + // Check that lattice hash computed in bank2 is the same as the full lattice + // hash computed from accounts db. + let bank2 = Arc::new(new_from_parent(bank1.clone())); + let key = solana_sdk::pubkey::new_rand(); + let account = AccountSharedData::new(100, 0, &system_program::id()); + bank2.store_account(&key, &account); + bank2.freeze(); + let lt_hash2 = *bank2.accumulated_accounts_hash.read().unwrap(); + assert_eq!(lt_hash2.unwrap(), compute_full_lt_hash(&bank2)); + + // Test bank3 with account overwrite. + // + // bank3 store the same account as bank2 did. But it stored the same + // `account` twice, with the final account be the same as bank2. Check that + // bank3's final lattice hash should be the same as `bank2`. + let bank3 = Arc::new(new_from_parent(bank1.clone())); + let account = AccountSharedData::new(200, 0, &system_program::id()); + bank3.store_account(&key, &account); + let account = AccountSharedData::new(100, 0, &system_program::id()); + bank3.store_account(&key, &account); + bank3.freeze(); + let lt_hash3 = *bank3.accumulated_accounts_hash.read().unwrap(); + assert_eq!(lt_hash3.unwrap(), compute_full_lt_hash(&bank3)); + assert_eq!(lt_hash3.unwrap(), lt_hash2.unwrap()); + + // Test bank4 with multiple user account update. + // + // Check that lattice hash computed in bank4 is the same as the full lattice + // hash computed from accounts db. + let bank4 = Arc::new(new_from_parent(bank1.clone())); + for i in 0..10 { + let key = solana_sdk::pubkey::new_rand(); + let account = AccountSharedData::new(100 * i, 0, &system_program::id()); + bank4.store_account(&key, &account); + } + bank4.freeze(); + let lt_hash4 = *bank4.accumulated_accounts_hash.read().unwrap(); + assert_eq!(lt_hash4.unwrap(), compute_full_lt_hash(&bank4)); +} + +#[test] +fn test_lt_hash_cache_insert() { + solana_logger::setup(); + let (genesis_config, _mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); + let bank0 = Bank::new_for_tests(&genesis_config); + + let key = solana_sdk::pubkey::new_rand(); + let account = AccountSharedData::new(100, 0, &system_program::id()); + let hash = AccountLTHash::default(); + + // Put a hash value in the cache + let mut old_written_accounts = bank0.lt_hash_cache.written_accounts_before.write().unwrap(); + old_written_accounts + .entry(key) + .or_insert(LTHashCacheValue::Hash(Box::new(hash))); + drop(old_written_accounts); + + // Try to insert an account with the same key. The insert should fail and + // NOT overwrite the the existing hash value in the cache. + bank0.insert_old_written_account(&key, &account); + + // Assert that the hash value is still in the cache. + let old_written_accounts = bank0.lt_hash_cache.written_accounts_before.read().unwrap(); + let hash2 = old_written_accounts.get(&key).unwrap(); + assert_eq!(*hash2, LTHashCacheValue::Hash(Box::new(hash))) +} diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 57936c2c7e6bac..047cd2d2485d62 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -17,6 +17,7 @@ pub mod genesis_utils; pub mod inline_spl_associated_token_account; pub mod installed_scheduler_pool; pub mod loader_utils; +mod lthash_cache; pub mod non_circulating_supply; pub mod prioritization_fee; pub mod prioritization_fee_cache; diff --git a/runtime/src/lthash_cache.rs b/runtime/src/lthash_cache.rs new file mode 100644 index 00000000000000..e3c2cf37d163cd --- /dev/null +++ b/runtime/src/lthash_cache.rs @@ -0,0 +1,64 @@ +/*! +A cache for lattice hash +*/ + +use solana_accounts_db::accounts_db::LTHashCacheMap; + +/// LTHashCache - A simple cache to avoid recomputing the LTHash for accounts +/// that are written EVERY slot. +/// +/// The cache inherits all LTHashes for all account written in the parent slot +/// in `written_accounts_before` member, and stores the 2048 bytes of LTHashes +/// computed from all the accounts written in this slot in `written_account_after` +/// member. +#[derive(Default, Debug)] +pub struct LTHashCache { + /// The pubkey, lthash/account pairs that were written by the last slot. + /// MANY accounts are written EVERY slot. This avoids a re-hashing. + pub written_accounts_before: LTHashCacheMap, + + /// The pubkey, lthash pairs that were written in this slot. + pub written_accounts_after: LTHashCacheMap, +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_accounts_db::{accounts_db::LTHashCacheValue, accounts_hash::AccountLTHash}, + solana_sdk::{account::AccountSharedData, pubkey::Pubkey}, + //std::{collections::HashMap, sync::RwLock}, + }; + + #[test] + fn test_lt_hash_cache() { + // Check cache default to empty. + let c = LTHashCache::default(); + assert!(c.written_accounts_before.read().unwrap().is_empty()); + assert!(c.written_accounts_after.read().unwrap().is_empty()); + + // Write to cache. + let k = solana_sdk::pubkey::new_rand(); + let account = AccountSharedData::new(100, 0, &Pubkey::default()); + let h = AccountLTHash::default(); + c.written_accounts_before + .write() + .unwrap() + .insert(k, LTHashCacheValue::Hash(Box::new(h))); + c.written_accounts_after + .write() + .unwrap() + .insert(k, LTHashCacheValue::Account(Box::new(account.clone()))); + assert!(!c.written_accounts_before.read().unwrap().is_empty()); + assert!(!c.written_accounts_after.read().unwrap().is_empty()); + + // Read from Cache. + let map1 = c.written_accounts_before.read().unwrap(); + let d1 = map1.get(&k).unwrap(); + assert_eq!(d1, <HashCacheValue::Hash(Box::new(h))); + + let map2 = c.written_accounts_after.write().unwrap(); + let d2 = map2.get(&k).unwrap(); + assert_eq!(d2, <HashCacheValue::Account(Box::new(account))); + } +} diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index 87c18b9717c9bc..7a291434daa3d9 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -236,6 +236,12 @@ fn load_transaction_accounts( .get_account_shared_data(key) .map(|mut account| { if message.is_writable(i) { + { + // TODO: find all the places this has to happen + // Start computing LTHash for the old account state in background. + callbacks.insert_old_written_account(key, &account); + } + if !feature_set .is_active(&feature_set::disable_rent_fees_collection::id()) { @@ -524,6 +530,8 @@ mod tests { fn get_feature_set(&self) -> Arc { self.feature_set.clone() } + + fn insert_old_written_account(&self, _key: &Pubkey, _account: &AccountSharedData) {} } fn load_accounts_with_fee_and_rent( diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 5426cf0fce9b16..bf4d26945d6797 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -107,6 +107,8 @@ pub trait TransactionProcessingCallback { fn get_program_match_criteria(&self, _program: &Pubkey) -> LoadedProgramMatchCriteria { LoadedProgramMatchCriteria::NoCriteria } + + fn insert_old_written_account(&self, key: &Pubkey, account: &AccountSharedData); } #[derive(Debug)] diff --git a/svm/tests/mock_bank.rs b/svm/tests/mock_bank.rs index 0c123369e25451..c39328811199ff 100644 --- a/svm/tests/mock_bank.rs +++ b/svm/tests/mock_bank.rs @@ -46,4 +46,6 @@ impl TransactionProcessingCallback for MockBankCallback { fn get_feature_set(&self) -> Arc { self.feature_set.clone() } + + fn insert_old_written_account(&self, _key: &Pubkey, _account: &AccountSharedData) {} } diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 7fc525477ef41e..737695be1c3bbb 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1408,6 +1408,13 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .conflicts_with("partitioned_epoch_rewards_compare_calculation") .hidden(hidden_unless_forced()), ) + .arg( + Arg::with_name("enable_accumulate_account_hash_calculation") + .long("enable-accumulate-account-hash-calculation") + .takes_value(false) + .help("Enable accumulate account hash calculation") + .hidden(hidden_unless_forced()), + ) .arg( Arg::with_name("accounts_index_path") .long("accounts-index-path") diff --git a/validator/src/main.rs b/validator/src/main.rs index 56050031975a52..fa26275a9171ae 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1247,6 +1247,8 @@ pub fn main() { test_partitioned_epoch_rewards, test_skip_rewrites_but_include_in_bank_hash: matches .is_present("accounts_db_test_skip_rewrites"), + enable_accumulate_account_hash_calculation: matches + .is_present("enable_accumulate_account_hash_calculation"), ..AccountsDbConfig::default() };