diff --git a/.buildkite/scripts/build-bench.sh b/.buildkite/scripts/build-bench.sh index a19e4291bc1426..27f156c141fe03 100755 --- a/.buildkite/scripts/build-bench.sh +++ b/.buildkite/scripts/build-bench.sh @@ -22,5 +22,5 @@ EOF # shellcheck disable=SC2016 group "bench" \ - "$(build_steps "bench-part-1" ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/bench/part1.sh")" \ - "$(build_steps "bench-part-2" ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/bench/part2.sh")" + "$(build_steps "bench-part-1" "ci/docker-run-default-image.sh ci/bench/part1.sh")" \ + "$(build_steps "bench-part-2" "ci/docker-run-default-image.sh ci/bench/part2.sh")" diff --git a/.buildkite/scripts/build-stable.sh b/.buildkite/scripts/build-stable.sh index e1d774e1669ab8..f20ca1db358402 100755 --- a/.buildkite/scripts/build-stable.sh +++ b/.buildkite/scripts/build-stable.sh @@ -12,7 +12,7 @@ partitions=$( cat < _**As of 2/1/24:** Max: 25,000 SOL tokens. Min: 6,250 SOL tokens_ * Theft of funds without users signature from any account @@ -118,14 +117,12 @@ _**As of 2/1/24:** Max: 25,000 SOL tokens. Min: 6,250 SOL tokens_ * Theft of funds that requires users signature - creating a vote program that drains the delegated stakes. #### Consensus/Safety Violations: -Current: $1,000,000 USD in locked SOL tokens (locked for 12 months)
_**As of 2/1/24:** Max: 12,500 SOL tokens. Min: 3,125 SOL tokens_ * Consensus safety violation * Tricking a validator to accept an optimistic confirmation or rooted slot without a double vote, etc. #### Liveness / Loss of Availability: -Current: $400,000 USD in locked SOL tokens (locked for 12 months)
_**As of 2/1/24:** Max: 5,000 SOL tokens. Min: 1,250 SOL tokens_ * Whereby consensus halts and requires human intervention @@ -133,19 +130,16 @@ _**As of 2/1/24:** Max: 5,000 SOL tokens. Min: 1,250 SOL tokens_ * Remote attacks that partition the network, #### DoS Attacks: -Current: $100,000 USD in locked SOL tokens (locked for 12 months)
_**As of 2/1/24:** Max: 1,250 SOL tokens. Min: 315 SOL tokens_ * Remote resource exhaustion via Non-RPC protocols #### Supply Chain Attacks: -Current: $100,000 USD in locked SOL tokens (locked for 12 months)
_**As of 2/1/24:** Max: 1,250 SOL tokens. Min: 315 SOL tokens_ * Non-social attacks against source code change management, automated testing, release build, release publication and release hosting infrastructure of the monorepo. #### RPC DoS/Crashes: -Current: $5,000 USD in locked SOL tokens (locked for 12 months)
_**As of 2/1/24:** Max: 65 SOL tokens. Min: 20 SOL tokens_ * RPC attacks @@ -190,6 +184,4 @@ bi = 2 ^ (R - ri) / ((2^R) - 1) ### Payment of Bug Bounties: * Bounties are currently awarded on a rolling/weekly basis and paid out within 30 days upon receipt of an invoice. -* The SOL/USD conversion rate used for payments is the market price of SOL (denominated in USD) at the end of the day the invoice is submitted by the researcher. -* The reference for this price is the Closing Price given by Coingecko.com on that date given here: https://www.coingecko.com/en/coins/solana/historical_data/usd#panel * Bug bounties that are paid out in SOL are paid to stake accounts with a lockup expiring 12 months from the date of delivery of SOL. diff --git a/account-decoder/src/parse_sysvar.rs b/account-decoder/src/parse_sysvar.rs index 3fda8e8560c623..35746949c7f9ef 100644 --- a/account-decoder/src/parse_sysvar.rs +++ b/account-decoder/src/parse_sysvar.rs @@ -9,7 +9,6 @@ use { bv::BitVec, solana_sdk::{ clock::{Clock, Epoch, Slot, UnixTimestamp}, - epoch_rewards_partition_data::EpochRewardsPartitionDataVersion, epoch_schedule::EpochSchedule, pubkey::Pubkey, rent::Rent, @@ -97,24 +96,7 @@ pub fn parse_sysvar(data: &[u8], pubkey: &Pubkey) -> Result(data) - { - let EpochRewardsPartitionDataVersion::V0(partition_data) = - epoch_rewards_partition_data; - Some(SysvarAccountType::EpochRewardsPartitionData( - UiEpochRewardsPartitionData { - version: 0, - num_partitions: partition_data.num_partitions as u64, - parent_blockhash: partition_data.parent_blockhash.to_string(), - }, - )) - } else { - None - } + None } }; parsed_account.ok_or(ParseAccountError::AccountNotParsable( @@ -138,7 +120,6 @@ pub enum SysvarAccountType { StakeHistory(Vec), LastRestartSlot(UiLastRestartSlot), EpochRewards(EpochRewards), - EpochRewardsPartitionData(UiEpochRewardsPartitionData), } #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Default)] @@ -258,14 +239,6 @@ pub struct UiLastRestartSlot { pub last_restart_slot: Slot, } -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Default)] -#[serde(rename_all = "camelCase")] -pub struct UiEpochRewardsPartitionData { - pub version: u32, - pub num_partitions: u64, - pub parent_blockhash: String, -} - #[cfg(test)] mod test { #[allow(deprecated)] diff --git a/accounts-bench/src/main.rs b/accounts-bench/src/main.rs index 88d15ea72482aa..9437485e1e6533 100644 --- a/accounts-bench/src/main.rs +++ b/accounts-bench/src/main.rs @@ -14,11 +14,11 @@ use { }, accounts_index::AccountSecondaryIndexes, ancestors::Ancestors, - rent_collector::RentCollector, }, solana_measure::measure::Measure, solana_sdk::{ - genesis_config::ClusterType, pubkey::Pubkey, sysvar::epoch_schedule::EpochSchedule, + genesis_config::ClusterType, pubkey::Pubkey, rent_collector::RentCollector, + sysvar::epoch_schedule::EpochSchedule, }, std::{env, fs, path::PathBuf, sync::Arc}, }; diff --git a/accounts-db/src/account_storage/meta.rs b/accounts-db/src/account_storage/meta.rs index 1442b4845bf604..69c24d7be75f7d 100644 --- a/accounts-db/src/account_storage/meta.rs +++ b/accounts-db/src/account_storage/meta.rs @@ -142,7 +142,7 @@ impl<'storage> StoredAccountMeta<'storage> { pub fn offset(&self) -> usize { match self { Self::AppendVec(av) => av.offset(), - Self::Hot(hot) => hot.index(), + Self::Hot(hot) => hot.index().0 as usize, } } diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 0c0058703503d0..9b65fc803d937e 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -6,9 +6,6 @@ use { }, accounts_index::{IndexKey, ScanConfig, ScanError, ScanResult, ZeroLamport}, ancestors::Ancestors, - nonce_info::{NonceFull, NonceInfo}, - rent_collector::RentCollector, - rent_debits::RentDebits, storable_accounts::StorableAccounts, transaction_results::TransactionExecutionResult, }, @@ -24,7 +21,9 @@ use { state::{DurableNonce, Versions as NonceVersions}, State as NonceState, }, + nonce_info::{NonceFull, NonceInfo}, pubkey::Pubkey, + rent_debits::RentDebits, slot_hashes::SlotHashes, transaction::{Result, SanitizedTransaction, TransactionAccountLocks, TransactionError}, transaction_context::{IndexOfAccount, TransactionAccount}, @@ -655,18 +654,11 @@ impl Accounts { txs: &[SanitizedTransaction], res: &[TransactionExecutionResult], loaded: &mut [TransactionLoadResult], - rent_collector: &RentCollector, durable_nonce: &DurableNonce, lamports_per_signature: u64, ) { - let (accounts_to_store, transactions) = self.collect_accounts_to_store( - txs, - res, - loaded, - rent_collector, - durable_nonce, - lamports_per_signature, - ); + let (accounts_to_store, transactions) = + self.collect_accounts_to_store(txs, res, loaded, durable_nonce, lamports_per_signature); self.accounts_db .store_cached_inline_update_index((slot, &accounts_to_store[..]), Some(&transactions)); } @@ -689,7 +681,6 @@ impl Accounts { txs: &'a [SanitizedTransaction], execution_results: &'a [TransactionExecutionResult], load_results: &'a mut [TransactionLoadResult], - _rent_collector: &RentCollector, durable_nonce: &DurableNonce, lamports_per_signature: u64, ) -> ( @@ -813,10 +804,7 @@ fn prepare_if_nonce_account( mod tests { use { super::*, - crate::{ - rent_collector::RentCollector, - transaction_results::{DurableNonceFee, TransactionExecutionDetails}, - }, + crate::transaction_results::{DurableNonceFee, TransactionExecutionDetails}, assert_matches::assert_matches, solana_program_runtime::loaded_programs::LoadedProgramsForTxBatch, solana_sdk::{ @@ -1512,8 +1500,6 @@ mod tests { let account1 = AccountSharedData::new(2, 0, &Pubkey::default()); let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); - let rent_collector = RentCollector::default(); - let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let message = Message::new_with_compiled_instructions( 1, @@ -1581,7 +1567,6 @@ mod tests { &txs, &execution_results, loaded.as_mut_slice(), - &rent_collector, &DurableNonce::default(), 0, ); @@ -1884,8 +1869,6 @@ mod tests { #[test] fn test_nonced_failure_accounts_rollback_from_pays() { - let rent_collector = RentCollector::default(); - let nonce_address = Pubkey::new_unique(); let nonce_authority = keypair_from_seed(&[0; 32]).unwrap(); let from = keypair_from_seed(&[1; 32]).unwrap(); @@ -1962,7 +1945,6 @@ mod tests { &txs, &execution_results, loaded.as_mut_slice(), - &rent_collector, &durable_nonce, 0, ); @@ -1994,8 +1976,6 @@ mod tests { #[test] fn test_nonced_failure_accounts_rollback_nonce_pays() { - let rent_collector = RentCollector::default(); - let nonce_authority = keypair_from_seed(&[0; 32]).unwrap(); let nonce_address = nonce_authority.pubkey(); let from = keypair_from_seed(&[1; 32]).unwrap(); @@ -2071,7 +2051,6 @@ mod tests { &txs, &execution_results, loaded.as_mut_slice(), - &rent_collector, &durable_nonce, 0, ); diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 4d37dcba060705..c89cf45e320971 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -67,7 +67,6 @@ use { partitioned_rewards::{PartitionedEpochRewardsConfig, TestPartitionedEpochRewards}, pubkey_bins::PubkeyBinCalculator24, read_only_accounts_cache::ReadOnlyAccountsCache, - rent_collector::RentCollector, sorted_storages::SortedStorages, storable_accounts::StorableAccounts, u64_align, utils, @@ -92,6 +91,7 @@ use { genesis_config::{ClusterType, GenesisConfig}, hash::Hash, pubkey::Pubkey, + rent_collector::RentCollector, saturating_add_assign, timing::AtomicInterval, transaction::SanitizedTransaction, @@ -494,6 +494,7 @@ pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig { index: Some(ACCOUNTS_INDEX_CONFIG_FOR_TESTING), base_working_path: None, accounts_hash_cache_path: None, + shrink_paths: None, write_cache_limit_bytes: None, ancient_append_vec_offset: None, skip_initial_hash_calc: false, @@ -506,6 +507,7 @@ pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig index: Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS), base_working_path: None, accounts_hash_cache_path: None, + shrink_paths: None, write_cache_limit_bytes: None, ancient_append_vec_offset: None, skip_initial_hash_calc: false, @@ -547,6 +549,7 @@ pub struct AccountsDbConfig { /// Base directory for various necessary files pub base_working_path: Option, pub accounts_hash_cache_path: Option, + pub shrink_paths: Option>, pub write_cache_limit_bytes: Option, /// if None, ancient append vecs are set to ANCIENT_APPEND_VEC_DEFAULT_OFFSET /// Some(offset) means include slots up to (max_slot - (slots_per_epoch - 'offset')) @@ -1396,7 +1399,7 @@ pub struct AccountsDb { accounts_hash_cache_path: PathBuf, - pub shrink_paths: RwLock>>, + shrink_paths: Vec, /// Directory of paths this accounts_db needs to hold/remove #[allow(dead_code)] @@ -2433,7 +2436,7 @@ impl AccountsDb { base_working_path, base_working_temp_dir, accounts_hash_cache_path, - shrink_paths: RwLock::new(None), + shrink_paths: Vec::default(), temp_paths: None, file_size: DEFAULT_FILE_SIZE, thread_pool: rayon::ThreadPoolBuilder::new() @@ -2570,6 +2573,10 @@ impl AccountsDb { new.paths = paths; new.temp_paths = Some(temp_dirs); }; + new.shrink_paths = accounts_db_config + .as_ref() + .and_then(|config| config.shrink_paths.clone()) + .unwrap_or_else(|| new.paths.clone()); new.start_background_hasher(); { @@ -2580,15 +2587,6 @@ impl AccountsDb { new } - pub fn set_shrink_paths(&self, paths: Vec) { - assert!(!paths.is_empty()); - let mut shrink_paths = self.shrink_paths.write().unwrap(); - for path in &paths { - std::fs::create_dir_all(path).expect("Create directory failed."); - } - *shrink_paths = Some(paths); - } - pub fn file_size(&self) -> u64 { self.file_size } @@ -3985,16 +3983,39 @@ impl AccountsDb { shrink_collect.alive_total_bytes as u64, shrink_collect.capacity, ) { + warn!( + "Unexpected shrink for slot {} alive {} capacity {}, \ + likely caused by a bug for calculating alive bytes.", + slot, shrink_collect.alive_total_bytes, shrink_collect.capacity + ); + self.shrink_stats .skipped_shrink .fetch_add(1, Ordering::Relaxed); - for pubkey in shrink_collect.unrefed_pubkeys { - if let Some(locked_entry) = self.accounts_index.get_account_read_entry(pubkey) { + + self.accounts_index.scan( + shrink_collect.unrefed_pubkeys.into_iter(), + |pubkey, _slot_refs, entry| { // pubkeys in `unrefed_pubkeys` were unref'd in `shrink_collect` above under the assumption that we would shrink everything. // Since shrink is not occurring, we need to addref the pubkeys to get the system back to the prior state since the account still exists at this slot. - locked_entry.addref(); - } - } + if let Some(entry) = entry { + entry.addref(); + } else { + // We also expect that the accounts index must contain an + // entry for `pubkey`. Log a warning for now. In future, + // we will panic when this happens. + warn!("pubkey {pubkey} in slot {slot} was NOT found in accounts index during shrink"); + datapoint_warn!( + "accounts_db-shink_pubkey_missing_from_index", + ("store_slot", slot, i64), + ("pubkey", pubkey.to_string(), String), + ) + } + AccountsIndexScanResult::OnlyKeepInMemoryIfDirty + }, + None, + true, + ); return; } @@ -4153,12 +4174,7 @@ impl AccountsDb { let shrunken_store = self .try_recycle_store(slot, aligned_total, aligned_total + 1024) .unwrap_or_else(|| { - let maybe_shrink_paths = self.shrink_paths.read().unwrap(); - let (shrink_paths, from) = maybe_shrink_paths - .as_ref() - .map(|paths| (paths, "shrink-w-path")) - .unwrap_or_else(|| (&self.paths, "shrink")); - self.create_store(slot, aligned_total, from, shrink_paths) + self.create_store(slot, aligned_total, "shrink", self.shrink_paths.as_slice()) }); self.storage.shrinking_in_progress(slot, shrunken_store) } diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 78662a04157744..cb75369d52d182 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -4,7 +4,6 @@ use { active_stats::{ActiveStatItem, ActiveStats}, ancestors::Ancestors, pubkey_bins::PubkeyBinCalculator24, - rent_collector::RentCollector, }, bytemuck::{Pod, Zeroable}, log::*, @@ -14,6 +13,7 @@ use { solana_sdk::{ hash::{Hash, Hasher}, pubkey::Pubkey, + rent_collector::RentCollector, slot_history::Slot, sysvar::epoch_schedule::EpochSchedule, }, diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index fc389116d09b71..493bb3130a9e2d 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -1358,10 +1358,24 @@ impl + Into> AccountsIndex { self.storage.get_startup_remaining_items_to_flush_estimate() } - /// For each pubkey, find the slot list in the accounts index - /// apply 'avoid_callback_result' if specified. - /// otherwise, call `callback` - /// if 'provide_entry_in_callback' is true, populate callback with the Arc of the entry itself. + /// Scan AccountsIndex for a given iterator of Pubkeys. + /// + /// This fn takes 4 arguments. + /// - an iterator of pubkeys to scan + /// - callback fn to run for each pubkey in the accounts index + /// - avoid_callback_result. If it is Some(default), then callback is ignored and + /// default is returned instead. + /// - provide_entry_in_callback. If true, populate the ref of the Arc of the + /// index entry to `callback` fn. Otherwise, provide None. + /// + /// The `callback` fn must return `AccountsIndexScanResult`, which is + /// used to indicates whether the AccountIndex Entry should be added to + /// in-memory cache. The `callback` fn takes in 3 arguments: + /// - the first an immutable ref of the pubkey, + /// - the second an option of the SlotList and RefCount + /// - the third an option of the AccountMapEntry, which is only populated + /// when `provide_entry_in_callback` is true. Otherwise, it will be + /// None. pub(crate) fn scan<'a, F, I>( &self, pubkeys: I, @@ -1369,15 +1383,6 @@ impl + Into> AccountsIndex { avoid_callback_result: Option, provide_entry_in_callback: bool, ) where - // params: - // pubkey looked up - // slots_refs is Option<(slot_list, ref_count)> - // None if 'pubkey' is not in accounts index. - // slot_list: comes from accounts index for 'pubkey' - // ref_count: refcount of entry in index - // entry, if 'provide_entry_in_callback' is true - // if 'avoid_callback_result' is Some(_), then callback is NOT called - // and _ is returned as if callback were called. F: FnMut( &'a Pubkey, Option<(&SlotList, RefCount)>, diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index 782abee7f2a9ff..bf91ca0d111523 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -44,6 +44,14 @@ pub mod test_utils; /// we need to add data len and align it to get the actual stored size pub const STORE_META_OVERHEAD: usize = 136; +// Ensure the STORE_META_OVERHEAD constant remains accurate +const _: () = assert!( + STORE_META_OVERHEAD + == mem::size_of::() + + mem::size_of::() + + mem::size_of::() +); + /// Returns the size this item will take to store plus possible alignment padding bytes before the next entry. /// fixed-size portion of per-account data written /// plus 'data_len', aligned to next boundary @@ -578,7 +586,11 @@ impl AppendVec { let mut offset = self.len(); let len = accounts.accounts.len(); - let mut offsets = Vec::with_capacity(len); + // Here we have `len - skip` number of accounts. The +1 extra capacity + // is for storing the aligned offset of the last entry to that is used + // to compute the StoredAccountInfo of the last entry. + let offsets_len = len - skip + 1; + let mut offsets = Vec::with_capacity(offsets_len); for i in skip..len { let (account, pubkey, hash, write_version_obsolete) = accounts.get(i); let account_meta = account @@ -621,10 +633,11 @@ impl AppendVec { if offsets.is_empty() { None } else { + let mut rv = Vec::with_capacity(offsets.len()); + // The last entry in this offset needs to be the u64 aligned offset, because that's // where the *next* entry will begin to be stored. offsets.push(u64_align!(offset)); - let mut rv = Vec::with_capacity(len); for offsets in offsets.windows(2) { rv.push(StoredAccountInfo { offset: offsets[0], @@ -716,13 +729,6 @@ pub mod tests { } } - static_assertions::const_assert_eq!( - STORE_META_OVERHEAD, - std::mem::size_of::() - + std::mem::size_of::() - + std::mem::size_of::() - ); - // Hash is [u8; 32], which has no alignment static_assertions::assert_eq_align!(u64, StoredMeta, AccountMeta); diff --git a/accounts-db/src/lib.rs b/accounts-db/src/lib.rs index 74fdb8627193ee..3016c6252ac612 100644 --- a/accounts-db/src/lib.rs +++ b/accounts-db/src/lib.rs @@ -5,7 +5,6 @@ extern crate lazy_static; pub mod account_info; -pub mod account_overrides; pub mod account_storage; pub mod accounts; pub mod accounts_cache; @@ -31,12 +30,9 @@ pub mod hardened_unpack; pub mod in_mem_accounts_index; pub mod inline_spl_token; pub mod inline_spl_token_2022; -pub mod nonce_info; pub mod partitioned_rewards; mod pubkey_bins; mod read_only_accounts_cache; -pub mod rent_collector; -pub mod rent_debits; mod rolling_bit_field; pub mod secondary_index; pub mod shared_buffer_reader; @@ -44,7 +40,6 @@ pub mod sorted_storages; pub mod stake_rewards; pub mod storable_accounts; pub mod tiered_storage; -pub mod transaction_error_metrics; pub mod transaction_results; pub mod utils; mod verify_accounts_hash_in_background; diff --git a/accounts-db/src/rent_collector.rs b/accounts-db/src/rent_collector.rs deleted file mode 100644 index 1a72cac88308b3..00000000000000 --- a/accounts-db/src/rent_collector.rs +++ /dev/null @@ -1,532 +0,0 @@ -//! calculate and collect rent from Accounts -use solana_sdk::{ - account::{AccountSharedData, ReadableAccount, WritableAccount}, - clock::Epoch, - epoch_schedule::EpochSchedule, - genesis_config::GenesisConfig, - incinerator, - pubkey::Pubkey, - rent::{Rent, RentDue}, -}; - -#[derive(Serialize, Deserialize, Clone, PartialEq, Debug, AbiExample)] -pub struct RentCollector { - pub epoch: Epoch, - pub epoch_schedule: EpochSchedule, - pub slots_per_year: f64, - pub rent: Rent, -} - -impl Default for RentCollector { - fn default() -> Self { - Self { - epoch: Epoch::default(), - epoch_schedule: EpochSchedule::default(), - // derive default value using GenesisConfig::default() - slots_per_year: GenesisConfig::default().slots_per_year(), - rent: Rent::default(), - } - } -} - -/// When rent is collected from an exempt account, rent_epoch is set to this -/// value. The idea is to have a fixed, consistent value for rent_epoch for all accounts that do not collect rent. -/// This enables us to get rid of the field completely. -pub const RENT_EXEMPT_RENT_EPOCH: Epoch = Epoch::MAX; - -/// when rent is collected for this account, this is the action to apply to the account -#[derive(Debug)] -enum RentResult { - /// this account will never have rent collected from it - Exempt, - /// maybe we collect rent later, but not now - NoRentCollectionNow, - /// collect rent - CollectRent { - new_rent_epoch: Epoch, - rent_due: u64, // lamports, could be 0 - }, -} - -impl RentCollector { - pub fn new( - epoch: Epoch, - epoch_schedule: EpochSchedule, - slots_per_year: f64, - rent: Rent, - ) -> Self { - Self { - epoch, - epoch_schedule, - slots_per_year, - rent, - } - } - - pub fn clone_with_epoch(&self, epoch: Epoch) -> Self { - Self { - epoch, - ..self.clone() - } - } - - /// true if it is easy to determine this account should consider having rent collected from it - pub fn should_collect_rent(&self, address: &Pubkey, account: &impl ReadableAccount) -> bool { - !(account.executable() // executable accounts must be rent-exempt balance - || *address == incinerator::id()) - } - - /// given an account that 'should_collect_rent' - /// returns (amount rent due, is_exempt_from_rent) - pub fn get_rent_due(&self, account: &impl ReadableAccount) -> RentDue { - if self - .rent - .is_exempt(account.lamports(), account.data().len()) - { - RentDue::Exempt - } else { - let account_rent_epoch = account.rent_epoch(); - let slots_elapsed: u64 = (account_rent_epoch..=self.epoch) - .map(|epoch| self.epoch_schedule.get_slots_in_epoch(epoch + 1)) - .sum(); - - // avoid infinite rent in rust 1.45 - let years_elapsed = if self.slots_per_year != 0.0 { - slots_elapsed as f64 / self.slots_per_year - } else { - 0.0 - }; - - // we know this account is not exempt - let due = self.rent.due_amount(account.data().len(), years_elapsed); - RentDue::Paying(due) - } - } - - // Updates the account's lamports and status, and returns the amount of rent collected, if any. - // This is NOT thread safe at some level. If we try to collect from the same account in - // parallel, we may collect twice. - #[must_use = "add to Bank::collected_rent"] - pub fn collect_from_existing_account( - &self, - address: &Pubkey, - account: &mut AccountSharedData, - set_exempt_rent_epoch_max: bool, - ) -> CollectedInfo { - match self.calculate_rent_result(address, account) { - RentResult::Exempt => { - if set_exempt_rent_epoch_max { - account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - } - CollectedInfo::default() - } - RentResult::NoRentCollectionNow => CollectedInfo::default(), - RentResult::CollectRent { - new_rent_epoch, - rent_due, - } => match account.lamports().checked_sub(rent_due) { - None | Some(0) => { - let account = std::mem::take(account); - CollectedInfo { - rent_amount: account.lamports(), - account_data_len_reclaimed: account.data().len() as u64, - } - } - Some(lamports) => { - account.set_lamports(lamports); - account.set_rent_epoch(new_rent_epoch); - CollectedInfo { - rent_amount: rent_due, - account_data_len_reclaimed: 0u64, - } - } - }, - } - } - - /// determine what should happen to collect rent from this account - #[must_use] - fn calculate_rent_result( - &self, - address: &Pubkey, - account: &impl ReadableAccount, - ) -> RentResult { - if account.rent_epoch() == RENT_EXEMPT_RENT_EPOCH || account.rent_epoch() > self.epoch { - // potentially rent paying account (or known and already marked exempt) - // Maybe collect rent later, leave account alone for now. - return RentResult::NoRentCollectionNow; - } - if !self.should_collect_rent(address, account) { - // easy to determine this account should not consider having rent collected from it - return RentResult::Exempt; - } - match self.get_rent_due(account) { - // account will not have rent collected ever - RentDue::Exempt => RentResult::Exempt, - // potentially rent paying account - // Maybe collect rent later, leave account alone for now. - RentDue::Paying(0) => RentResult::NoRentCollectionNow, - // Rent is collected for next epoch. - RentDue::Paying(rent_due) => RentResult::CollectRent { - new_rent_epoch: self.epoch + 1, - rent_due, - }, - } - } -} - -/// Information computed during rent collection -#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] -pub struct CollectedInfo { - /// Amount of rent collected from account - pub rent_amount: u64, - /// Size of data reclaimed from account (happens when account's lamports go to zero) - pub account_data_len_reclaimed: u64, -} - -impl std::ops::Add for CollectedInfo { - type Output = Self; - fn add(self, other: Self) -> Self { - Self { - rent_amount: self.rent_amount + other.rent_amount, - account_data_len_reclaimed: self.account_data_len_reclaimed - + other.account_data_len_reclaimed, - } - } -} - -impl std::ops::AddAssign for CollectedInfo { - fn add_assign(&mut self, other: Self) { - *self = *self + other; - } -} - -#[cfg(test)] -mod tests { - use { - super::*, - assert_matches::assert_matches, - solana_sdk::{account::Account, sysvar}, - }; - - fn default_rent_collector_clone_with_epoch(epoch: Epoch) -> RentCollector { - RentCollector::default().clone_with_epoch(epoch) - } - - impl RentCollector { - #[must_use = "add to Bank::collected_rent"] - fn collect_from_created_account( - &self, - address: &Pubkey, - account: &mut AccountSharedData, - set_exempt_rent_epoch_max: bool, - ) -> CollectedInfo { - // initialize rent_epoch as created at this epoch - account.set_rent_epoch(self.epoch); - self.collect_from_existing_account(address, account, set_exempt_rent_epoch_max) - } - } - - #[test] - fn test_calculate_rent_result() { - for set_exempt_rent_epoch_max in [false, true] { - let mut rent_collector = RentCollector::default(); - - let mut account = AccountSharedData::default(); - assert_matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account), - RentResult::NoRentCollectionNow - ); - { - let mut account_clone = account.clone(); - assert_eq!( - rent_collector.collect_from_existing_account( - &Pubkey::default(), - &mut account_clone, - set_exempt_rent_epoch_max - ), - CollectedInfo::default() - ); - assert_eq!(account_clone, account); - } - - account.set_executable(true); - assert_matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account), - RentResult::Exempt - ); - { - let mut account_clone = account.clone(); - let mut account_expected = account.clone(); - if set_exempt_rent_epoch_max { - account_expected.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - } - assert_eq!( - rent_collector.collect_from_existing_account( - &Pubkey::default(), - &mut account_clone, - set_exempt_rent_epoch_max - ), - CollectedInfo::default() - ); - assert_eq!(account_clone, account_expected); - } - - account.set_executable(false); - assert_matches!( - rent_collector.calculate_rent_result(&incinerator::id(), &account), - RentResult::Exempt - ); - { - let mut account_clone = account.clone(); - let mut account_expected = account.clone(); - if set_exempt_rent_epoch_max { - account_expected.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - } - assert_eq!( - rent_collector.collect_from_existing_account( - &incinerator::id(), - &mut account_clone, - set_exempt_rent_epoch_max - ), - CollectedInfo::default() - ); - assert_eq!(account_clone, account_expected); - } - - // try a few combinations of rent collector rent epoch and collecting rent - for (rent_epoch, rent_due_expected) in [(2, 2), (3, 5)] { - rent_collector.epoch = rent_epoch; - account.set_lamports(10); - account.set_rent_epoch(1); - let new_rent_epoch_expected = rent_collector.epoch + 1; - assert!( - matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account), - RentResult::CollectRent{ new_rent_epoch, rent_due} if new_rent_epoch == new_rent_epoch_expected && rent_due == rent_due_expected, - ), - "{:?}", - rent_collector.calculate_rent_result(&Pubkey::default(), &account) - ); - - { - let mut account_clone = account.clone(); - assert_eq!( - rent_collector.collect_from_existing_account( - &Pubkey::default(), - &mut account_clone, - set_exempt_rent_epoch_max - ), - CollectedInfo { - rent_amount: rent_due_expected, - account_data_len_reclaimed: 0 - } - ); - let mut account_expected = account.clone(); - account_expected.set_lamports(account.lamports() - rent_due_expected); - account_expected.set_rent_epoch(new_rent_epoch_expected); - assert_eq!(account_clone, account_expected); - } - } - - // enough lamports to make us exempt - account.set_lamports(1_000_000); - let result = rent_collector.calculate_rent_result(&Pubkey::default(), &account); - assert!( - matches!(result, RentResult::Exempt), - "{result:?}, set_exempt_rent_epoch_max: {set_exempt_rent_epoch_max}", - ); - { - let mut account_clone = account.clone(); - let mut account_expected = account.clone(); - if set_exempt_rent_epoch_max { - account_expected.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - } - assert_eq!( - rent_collector.collect_from_existing_account( - &Pubkey::default(), - &mut account_clone, - set_exempt_rent_epoch_max - ), - CollectedInfo::default() - ); - assert_eq!(account_clone, account_expected); - } - - // enough lamports to make us exempt - // but, our rent_epoch is set in the future, so we can't know if we are exempt yet or not. - // We don't calculate rent amount vs data if the rent_epoch is already in the future. - account.set_rent_epoch(1_000_000); - assert_matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account), - RentResult::NoRentCollectionNow - ); - { - let mut account_clone = account.clone(); - assert_eq!( - rent_collector.collect_from_existing_account( - &Pubkey::default(), - &mut account_clone, - set_exempt_rent_epoch_max - ), - CollectedInfo::default() - ); - assert_eq!(account_clone, account); - } - } - } - - #[test] - fn test_collect_from_account_created_and_existing() { - for set_exempt_rent_epoch_max in [false, true] { - let old_lamports = 1000; - let old_epoch = 1; - let new_epoch = 2; - - let (mut created_account, mut existing_account) = { - let account = AccountSharedData::from(Account { - lamports: old_lamports, - rent_epoch: old_epoch, - ..Account::default() - }); - - (account.clone(), account) - }; - - let rent_collector = default_rent_collector_clone_with_epoch(new_epoch); - - // collect rent on a newly-created account - let collected = rent_collector.collect_from_created_account( - &solana_sdk::pubkey::new_rand(), - &mut created_account, - set_exempt_rent_epoch_max, - ); - assert!(created_account.lamports() < old_lamports); - assert_eq!( - created_account.lamports() + collected.rent_amount, - old_lamports - ); - assert_ne!(created_account.rent_epoch(), old_epoch); - assert_eq!(collected.account_data_len_reclaimed, 0); - - // collect rent on a already-existing account - let collected = rent_collector.collect_from_existing_account( - &solana_sdk::pubkey::new_rand(), - &mut existing_account, - set_exempt_rent_epoch_max, - ); - assert!(existing_account.lamports() < old_lamports); - assert_eq!( - existing_account.lamports() + collected.rent_amount, - old_lamports - ); - assert_ne!(existing_account.rent_epoch(), old_epoch); - assert_eq!(collected.account_data_len_reclaimed, 0); - - // newly created account should be collected for less rent; thus more remaining balance - assert!(created_account.lamports() > existing_account.lamports()); - assert_eq!(created_account.rent_epoch(), existing_account.rent_epoch()); - } - } - - #[test] - fn test_rent_exempt_temporal_escape() { - for set_exempt_rent_epoch_max in [false, true] { - for pass in 0..2 { - let mut account = AccountSharedData::default(); - let epoch = 3; - let huge_lamports = 123_456_789_012; - let tiny_lamports = 789_012; - let pubkey = solana_sdk::pubkey::new_rand(); - - assert_eq!(account.rent_epoch(), 0); - - // create a tested rent collector - let rent_collector = default_rent_collector_clone_with_epoch(epoch); - - if pass == 0 { - account.set_lamports(huge_lamports); - // first mark account as being collected while being rent-exempt - let collected = rent_collector.collect_from_existing_account( - &pubkey, - &mut account, - set_exempt_rent_epoch_max, - ); - assert_eq!(account.lamports(), huge_lamports); - assert_eq!(collected, CollectedInfo::default()); - continue; - } - - // decrease the balance not to be rent-exempt - // In a real validator, it is not legal to reduce an account's lamports such that the account becomes rent paying. - // So, pass == 0 above tests the case of rent that is exempt. pass == 1 tests the case where we are rent paying. - account.set_lamports(tiny_lamports); - - // ... and trigger another rent collection on the same epoch and check that rent is working - let collected = rent_collector.collect_from_existing_account( - &pubkey, - &mut account, - set_exempt_rent_epoch_max, - ); - assert_eq!(account.lamports(), tiny_lamports - collected.rent_amount); - assert_ne!(collected, CollectedInfo::default()); - } - } - } - - #[test] - fn test_rent_exempt_sysvar() { - for set_exempt_rent_epoch_max in [false, true] { - let tiny_lamports = 1; - let mut account = AccountSharedData::default(); - account.set_owner(sysvar::id()); - account.set_lamports(tiny_lamports); - - let pubkey = solana_sdk::pubkey::new_rand(); - - assert_eq!(account.rent_epoch(), 0); - - let epoch = 3; - let rent_collector = default_rent_collector_clone_with_epoch(epoch); - - let collected = rent_collector.collect_from_existing_account( - &pubkey, - &mut account, - set_exempt_rent_epoch_max, - ); - assert_eq!(account.lamports(), 0); - assert_eq!(collected.rent_amount, 1); - } - } - - /// Ensure that when an account is "rent collected" away, its data len is returned. - #[test] - fn test_collect_cleans_up_account() { - for set_exempt_rent_epoch_max in [false, true] { - solana_logger::setup(); - let account_lamports = 1; // must be *below* rent amount - let account_data_len = 567; - let account_rent_epoch = 11; - let mut account = AccountSharedData::from(Account { - lamports: account_lamports, // <-- must be below rent-exempt amount - data: vec![u8::default(); account_data_len], - rent_epoch: account_rent_epoch, - ..Account::default() - }); - let rent_collector = default_rent_collector_clone_with_epoch(account_rent_epoch + 1); - - let collected = rent_collector.collect_from_existing_account( - &Pubkey::new_unique(), - &mut account, - set_exempt_rent_epoch_max, - ); - - assert_eq!(collected.rent_amount, account_lamports); - assert_eq!( - collected.account_data_len_reclaimed, - account_data_len as u64 - ); - assert_eq!(account, AccountSharedData::default()); - } - } -} diff --git a/accounts-db/src/stake_rewards.rs b/accounts-db/src/stake_rewards.rs index 9918c84747e465..712f2cb9957f1e 100644 --- a/accounts-db/src/stake_rewards.rs +++ b/accounts-db/src/stake_rewards.rs @@ -3,21 +3,10 @@ use { crate::storable_accounts::StorableAccounts, solana_sdk::{ - account::AccountSharedData, clock::Slot, pubkey::Pubkey, reward_type::RewardType, + account::AccountSharedData, clock::Slot, pubkey::Pubkey, reward_info::RewardInfo, }, }; -#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, AbiExample, Clone, Copy)] -pub struct RewardInfo { - pub reward_type: RewardType, - /// Reward amount - pub lamports: i64, - /// Account balance in lamports after `lamports` was applied - pub post_balance: u64, - /// Vote account commission when the reward was credited, only present for voting and staking rewards - pub commission: Option, -} - #[derive(AbiExample, Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct StakeReward { pub stake_pubkey: Pubkey, @@ -94,7 +83,7 @@ impl StakeReward { Self { stake_pubkey: Pubkey::new_unique(), stake_reward_info: RewardInfo { - reward_type: RewardType::Staking, + reward_type: solana_sdk::reward_type::RewardType::Staking, lamports: rng.gen_range(1..200), post_balance: 0, /* unused atm */ commission: None, /* unused atm */ diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index a6a8dc5fb0471e..f0a23150e2fa70 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -20,6 +20,7 @@ use { }, error::TieredStorageError, footer::{AccountBlockFormat, AccountMetaFormat}, + hot::{HotStorageWriter, HOT_FORMAT}, index::IndexBlockFormat, owners::OwnersBlockFormat, readable::TieredStorageReader, @@ -30,14 +31,13 @@ use { path::{Path, PathBuf}, sync::OnceLock, }, - writer::TieredStorageWriter, }; pub type TieredStorageResult = Result; /// The struct that defines the formats of all building blocks of a /// TieredStorage. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] pub struct TieredStorageFormat { pub meta_entry_size: usize, pub account_meta_format: AccountMetaFormat, @@ -115,19 +115,23 @@ impl TieredStorage { )); } - let result = { - let writer = TieredStorageWriter::new(&self.path, format)?; - writer.write_accounts(accounts, skip) - }; + if format == &HOT_FORMAT { + let result = { + let writer = HotStorageWriter::new(&self.path)?; + writer.write_accounts(accounts, skip) + }; + + // panic here if self.reader.get() is not None as self.reader can only be + // None since we have passed `is_read_only()` check previously, indicating + // self.reader is not yet set. + self.reader + .set(TieredStorageReader::new_from_path(&self.path)?) + .unwrap(); - // panic here if self.reader.get() is not None as self.reader can only be - // None since we have passed `is_read_only()` check previously, indicating - // self.reader is not yet set. - self.reader - .set(TieredStorageReader::new_from_path(&self.path)?) - .unwrap(); + return result; + } - result + Err(TieredStorageError::UnknownFormat(self.path.to_path_buf())) } /// Returns the underlying reader of the TieredStorage. None will be @@ -156,18 +160,23 @@ impl TieredStorage { mod tests { use { super::*, - crate::account_storage::meta::{StoredMeta, StoredMetaWriteVersion}, + crate::account_storage::meta::{StoredAccountMeta, StoredMeta, StoredMetaWriteVersion}, footer::{TieredStorageFooter, TieredStorageMagicNumber}, hot::HOT_FORMAT, - solana_accounts_db::rent_collector::RENT_EXEMPT_RENT_EPOCH, + index::IndexOffset, + owners::OWNER_NO_OWNER, solana_sdk::{ account::{Account, AccountSharedData}, clock::Slot, hash::Hash, pubkey::Pubkey, + rent_collector::RENT_EXEMPT_RENT_EPOCH, system_instruction::MAX_PERMITTED_DATA_LENGTH, }, - std::mem::ManuallyDrop, + std::{ + collections::{HashMap, HashSet}, + mem::ManuallyDrop, + }, tempfile::tempdir, }; @@ -201,6 +210,7 @@ mod tests { Err(TieredStorageError::AttemptToUpdateReadOnly(_)), ) => {} (Err(TieredStorageError::Unsupported()), Err(TieredStorageError::Unsupported())) => {} + (Ok(_), Ok(_)) => {} // we don't expect error type mis-match or other error types here _ => { panic!("actual: {result:?}, expected: {expected_result:?}"); @@ -229,10 +239,7 @@ mod tests { assert_eq!(tiered_storage.path(), tiered_storage_path); assert_eq!(tiered_storage.file_size().unwrap(), 0); - // Expect the result to be TieredStorageError::Unsupported as the feature - // is not yet fully supported, but we can still check its partial results - // in the test. - write_zero_accounts(&tiered_storage, Err(TieredStorageError::Unsupported())); + write_zero_accounts(&tiered_storage, Ok(vec![])); } let tiered_storage_readonly = TieredStorage::new_readonly(&tiered_storage_path).unwrap(); @@ -257,10 +264,7 @@ mod tests { let tiered_storage_path = temp_dir.path().join("test_write_accounts_twice"); let tiered_storage = TieredStorage::new_writable(&tiered_storage_path); - // Expect the result to be TieredStorageError::Unsupported as the feature - // is not yet fully supported, but we can still check its partial results - // in the test. - write_zero_accounts(&tiered_storage, Err(TieredStorageError::Unsupported())); + write_zero_accounts(&tiered_storage, Ok(vec![])); // Expect AttemptToUpdateReadOnly error as write_accounts can only // be invoked once. write_zero_accounts( @@ -278,7 +282,7 @@ mod tests { let tiered_storage_path = temp_dir.path().join("test_remove_on_drop"); { let tiered_storage = TieredStorage::new_writable(&tiered_storage_path); - write_zero_accounts(&tiered_storage, Err(TieredStorageError::Unsupported())); + write_zero_accounts(&tiered_storage, Ok(vec![])); } // expect the file does not exists as it has been removed on drop assert!(!tiered_storage_path.try_exists().unwrap()); @@ -286,7 +290,7 @@ mod tests { { let tiered_storage = ManuallyDrop::new(TieredStorage::new_writable(&tiered_storage_path)); - write_zero_accounts(&tiered_storage, Err(TieredStorageError::Unsupported())); + write_zero_accounts(&tiered_storage, Ok(vec![])); } // expect the file exists as we have ManuallyDrop this time. assert!(tiered_storage_path.try_exists().unwrap()); @@ -329,6 +333,35 @@ mod tests { (stored_meta, AccountSharedData::from(account)) } + fn verify_account( + stored_meta: &StoredAccountMeta<'_>, + account: Option<&impl ReadableAccount>, + account_hash: &AccountHash, + ) { + let (lamports, owner, data, executable, account_hash) = account + .map(|acc| { + ( + acc.lamports(), + acc.owner(), + acc.data(), + acc.executable(), + // only persist rent_epoch for those rent-paying accounts + Some(*account_hash), + ) + }) + .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None)); + + assert_eq!(stored_meta.lamports(), lamports); + assert_eq!(stored_meta.data().len(), data.len()); + assert_eq!(stored_meta.data(), data); + assert_eq!(stored_meta.executable(), executable); + assert_eq!(stored_meta.owner(), owner); + assert_eq!( + *stored_meta.hash(), + account_hash.unwrap_or(AccountHash(Hash::default())) + ); + } + /// The helper function for all write_accounts tests. /// Currently only supports hot accounts. fn do_test_write_accounts( @@ -368,34 +401,27 @@ mod tests { let tiered_storage = TieredStorage::new_writable(tiered_storage_path); _ = tiered_storage.write_accounts(&storable_accounts, 0, &format); - verify_hot_storage(&tiered_storage, &accounts, format); - } - - /// Verify the generated tiered storage in the test. - fn verify_hot_storage( - tiered_storage: &TieredStorage, - expected_accounts: &[(StoredMeta, AccountSharedData)], - expected_format: TieredStorageFormat, - ) { let reader = tiered_storage.reader().unwrap(); - assert_eq!(reader.num_accounts(), expected_accounts.len()); - - let footer = reader.footer(); - let expected_footer = TieredStorageFooter { - account_meta_format: expected_format.account_meta_format, - owners_block_format: expected_format.owners_block_format, - index_block_format: expected_format.index_block_format, - account_block_format: expected_format.account_block_format, - account_entry_count: expected_accounts.len() as u32, - // Hash is not yet implemented, so we bypass the check - hash: footer.hash, - ..TieredStorageFooter::default() - }; + let num_accounts = storable_accounts.len(); + assert_eq!(reader.num_accounts(), num_accounts); - // TODO(yhchiang): verify account meta and data once the reader side - // is implemented in a separate PR. + let mut expected_accounts_map = HashMap::new(); + for i in 0..num_accounts { + let (account, address, account_hash, _write_version) = storable_accounts.get(i); + expected_accounts_map.insert(address, (account, account_hash)); + } - assert_eq!(*footer, expected_footer); + let mut index_offset = IndexOffset(0); + let mut verified_accounts = HashSet::new(); + while let Some((stored_meta, next)) = reader.get_account(index_offset).unwrap() { + if let Some((account, account_hash)) = expected_accounts_map.get(stored_meta.pubkey()) { + verify_account(&stored_meta, *account, account_hash); + verified_accounts.insert(stored_meta.pubkey()); + } + index_offset = next; + } + assert!(!verified_accounts.is_empty()); + assert_eq!(verified_accounts.len(), expected_accounts_map.len()) } #[test] diff --git a/accounts-db/src/tiered_storage/error.rs b/accounts-db/src/tiered_storage/error.rs index e0c8ffa5ca482d..145334574b4ea3 100644 --- a/accounts-db/src/tiered_storage/error.rs +++ b/accounts-db/src/tiered_storage/error.rs @@ -11,7 +11,7 @@ pub enum TieredStorageError { #[error("AttemptToUpdateReadOnly: attempted to update read-only file {0}")] AttemptToUpdateReadOnly(PathBuf), - #[error("UnknownFormat: the tiered storage format is unavailable for file {0}")] + #[error("UnknownFormat: the tiered storage format is unknown for file {0}")] UnknownFormat(PathBuf), #[error("Unsupported: the feature is not yet supported")] diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 311da9916785f6..7db9e90d65d353 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -2,10 +2,9 @@ use { crate::{ - account_storage::meta::StoredAccountMeta, + account_storage::meta::{StoredAccountInfo, StoredAccountMeta}, accounts_file::MatchAccountOwnerError, accounts_hash::AccountHash, - rent_collector::RENT_EXEMPT_RENT_EPOCH, tiered_storage::{ byte_block, file::TieredStorageFile, @@ -22,7 +21,10 @@ use { bytemuck::{Pod, Zeroable}, memmap2::{Mmap, MmapOptions}, modular_bitfield::prelude::*, - solana_sdk::{account::ReadableAccount, pubkey::Pubkey, stake_history::Epoch}, + solana_sdk::{ + account::ReadableAccount, pubkey::Pubkey, rent_collector::RENT_EXEMPT_RENT_EPOCH, + stake_history::Epoch, + }, std::{borrow::Borrow, fs::OpenOptions, option::Option, path::Path}, }; @@ -435,7 +437,7 @@ impl HotStorageReader { pub fn get_account( &self, index_offset: IndexOffset, - ) -> TieredStorageResult, usize)>> { + ) -> TieredStorageResult, IndexOffset)>> { if index_offset.0 >= self.footer.account_entry_count { return Ok(None); } @@ -452,12 +454,30 @@ impl HotStorageReader { meta, address, owner, - index: index_offset.0 as usize, + index: index_offset, account_block, }), - index_offset.0.saturating_add(1) as usize, + IndexOffset(index_offset.0.saturating_add(1)), ))) } + + /// Return a vector of account metadata for each account, starting from + /// `index_offset` + pub fn accounts( + &self, + mut index_offset: IndexOffset, + ) -> TieredStorageResult> { + let mut accounts = Vec::with_capacity( + self.footer + .account_entry_count + .saturating_sub(index_offset.0) as usize, + ); + while let Some((account, next)) = self.get_account(index_offset)? { + accounts.push(account); + index_offset = next; + } + Ok(accounts) + } } fn write_optional_fields( @@ -543,7 +563,7 @@ impl HotStorageWriter { &self, accounts: &StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V>, skip: usize, - ) -> TieredStorageResult<()> { + ) -> TieredStorageResult> { let mut footer = new_hot_footer(); let mut index = vec![]; let mut owners_table = OwnersTable::default(); @@ -551,6 +571,8 @@ impl HotStorageWriter { // writing accounts blocks let len = accounts.accounts.len(); + let total_input_accounts = len - skip; + let mut stored_infos = Vec::with_capacity(total_input_accounts); for i in skip..len { let (account, address, account_hash, _write_version) = accounts.get(i); let index_entry = AccountIndexWriterEntry { @@ -574,7 +596,7 @@ impl HotStorageWriter { }) .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None, None)); let owner_offset = owners_table.insert(owner); - cursor += self.write_account( + let stored_size = self.write_account( lamports, owner_offset, data, @@ -582,9 +604,25 @@ impl HotStorageWriter { rent_epoch, account_hash, )?; + cursor += stored_size; + + stored_infos.push(StoredAccountInfo { + // Here we pass the IndexOffset as the get_account() API + // takes IndexOffset. Given the account address is also + // maintained outside the TieredStorage, a potential optimization + // is to store AccountOffset instead, which can further save + // one jump from the index block to the accounts block. + offset: index.len(), + // Here we only include the stored size that the account directly + // contribute (i.e., account entry + index entry that include the + // account meta, data, optional fields, its address, and AccountOffset). + // Storage size from those shared blocks like footer and owners block + // is not included. + size: stored_size + footer.index_block_format.entry_size::(), + }); index.push(index_entry); } - footer.account_entry_count = (len - skip) as u32; + footer.account_entry_count = total_input_accounts as u32; // writing index block // expect the offset of each block aligned. @@ -611,7 +649,7 @@ impl HotStorageWriter { footer.write_footer_block(&self.storage)?; - Ok(()) + Ok(stored_infos) } } @@ -1226,7 +1264,7 @@ pub mod tests { ); assert_eq!(*stored_meta.pubkey(), addresses[i]); - assert_eq!(i + 1, next); + assert_eq!(i + 1, next.0 as usize); } // Make sure it returns None on NUM_ACCOUNTS to allow termination on // while loop in actual accounts-db read case. @@ -1280,6 +1318,37 @@ pub mod tests { (stored_meta, AccountSharedData::from(account)) } + fn verify_account( + stored_meta: &StoredAccountMeta<'_>, + account: Option<&impl ReadableAccount>, + address: &Pubkey, + account_hash: &AccountHash, + ) { + let (lamports, owner, data, executable, account_hash) = account + .map(|acc| { + ( + acc.lamports(), + acc.owner(), + acc.data(), + acc.executable(), + // only persist rent_epoch for those rent-paying accounts + Some(*account_hash), + ) + }) + .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None)); + + assert_eq!(stored_meta.lamports(), lamports); + assert_eq!(stored_meta.data().len(), data.len()); + assert_eq!(stored_meta.data(), data); + assert_eq!(stored_meta.executable(), executable); + assert_eq!(stored_meta.owner(), owner); + assert_eq!(stored_meta.pubkey(), address); + assert_eq!( + *stored_meta.hash(), + account_hash.unwrap_or(AccountHash(Hash::default())) + ); + } + #[test] fn test_write_account_and_index_blocks() { let account_data_sizes = &[ @@ -1316,11 +1385,10 @@ pub mod tests { let temp_dir = TempDir::new().unwrap(); let path = temp_dir.path().join("test_write_account_and_index_blocks"); - - { + let stored_infos = { let writer = HotStorageWriter::new(&path).unwrap(); - writer.write_accounts(&storable_accounts, 0).unwrap(); - } + writer.write_accounts(&storable_accounts, 0).unwrap() + }; let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); @@ -1333,31 +1401,9 @@ pub mod tests { .unwrap(); let (account, address, account_hash, _write_version) = storable_accounts.get(i); - let (lamports, owner, data, executable, account_hash) = account - .map(|acc| { - ( - acc.lamports(), - acc.owner(), - acc.data(), - acc.executable(), - // only persist rent_epoch for those rent-paying accounts - Some(*account_hash), - ) - }) - .unwrap_or((0, &OWNER_NO_OWNER, &[], false, None)); - - assert_eq!(stored_meta.lamports(), lamports); - assert_eq!(stored_meta.data().len(), data.len()); - assert_eq!(stored_meta.data(), data); - assert_eq!(stored_meta.executable(), executable); - assert_eq!(stored_meta.owner(), owner); - assert_eq!(stored_meta.pubkey(), address); - assert_eq!( - *stored_meta.hash(), - account_hash.unwrap_or(AccountHash(Hash::default())) - ); + verify_account(&stored_meta, account, address, account_hash); - assert_eq!(i + 1, next); + assert_eq!(i + 1, next.0 as usize); } // Make sure it returns None on NUM_ACCOUNTS to allow termination on // while loop in actual accounts-db read case. @@ -1365,5 +1411,32 @@ pub mod tests { hot_storage.get_account(IndexOffset(num_accounts as u32)), Ok(None) ); + + for stored_info in stored_infos { + let (stored_meta, _) = hot_storage + .get_account(IndexOffset(stored_info.offset as u32)) + .unwrap() + .unwrap(); + + let (account, address, account_hash, _write_version) = + storable_accounts.get(stored_info.offset); + verify_account(&stored_meta, account, address, account_hash); + } + + // verify get_accounts + let accounts = hot_storage.accounts(IndexOffset(0)).unwrap(); + + // first, we verify everything + for (i, stored_meta) in accounts.iter().enumerate() { + let (account, address, account_hash, _write_version) = storable_accounts.get(i); + verify_account(stored_meta, account, address, account_hash); + } + + // second, we verify various initial position + let total_stored_accounts = accounts.len(); + for i in 0..total_stored_accounts { + let partial_accounts = hot_storage.accounts(IndexOffset(i as u32)).unwrap(); + assert_eq!(&partial_accounts, &accounts[i..]); + } } } diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index 647c78d5ca91c1..12c4a8224d48ea 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -25,7 +25,7 @@ pub struct TieredReadableAccount<'accounts_file, M: TieredAccountMeta> { /// The address of the account owner pub owner: &'accounts_file Pubkey, /// The index for accessing the account inside its belonging AccountsFile - pub index: usize, + pub index: IndexOffset, /// The account block that contains this account. Note that this account /// block may be shared with other accounts. pub account_block: &'accounts_file [u8], @@ -43,7 +43,7 @@ impl<'accounts_file, M: TieredAccountMeta> TieredReadableAccount<'accounts_file, } /// Returns the index to this account in its AccountsFile. - pub fn index(&self) -> usize { + pub fn index(&self) -> IndexOffset { self.index } @@ -118,10 +118,10 @@ impl TieredStorageReader { /// Returns the account located at the specified index offset. pub fn get_account( &self, - index_offset: u32, - ) -> TieredStorageResult, usize)>> { + index_offset: IndexOffset, + ) -> TieredStorageResult, IndexOffset)>> { match self { - Self::Hot(hot) => hot.get_account(IndexOffset(index_offset)), + Self::Hot(hot) => hot.get_account(index_offset), } } @@ -136,16 +136,27 @@ impl TieredStorageReader { /// causes a data overrun. pub fn account_matches_owners( &self, - index_offset: u32, + index_offset: IndexOffset, owners: &[Pubkey], ) -> Result { match self { Self::Hot(hot) => { let account_offset = hot - .get_account_offset(IndexOffset(index_offset)) + .get_account_offset(index_offset) .map_err(|_| MatchAccountOwnerError::UnableToLoad)?; hot.account_matches_owners(account_offset, owners) } } } + + /// Return a vector of account metadata for each account, starting from + /// `index_offset` + pub fn accounts( + &self, + index_offset: IndexOffset, + ) -> TieredStorageResult> { + match self { + Self::Hot(hot) => hot.accounts(index_offset), + } + } } diff --git a/accounts-db/src/transaction_results.rs b/accounts-db/src/transaction_results.rs index bc0a330f507399..d213d7dab264e0 100644 --- a/accounts-db/src/transaction_results.rs +++ b/accounts-db/src/transaction_results.rs @@ -5,20 +5,15 @@ )] pub use solana_sdk::inner_instruction::{InnerInstruction, InnerInstructionsList}; use { - crate::{ - nonce_info::{NonceFull, NonceInfo, NoncePartial}, - rent_debits::RentDebits, - }, solana_program_runtime::loaded_programs::LoadedProgramsForTxBatch, solana_sdk::{ - instruction::{CompiledInstruction, TRANSACTION_LEVEL_STACK_HEIGHT}, + nonce_info::{NonceFull, NonceInfo}, + rent_debits::RentDebits, transaction::{self, TransactionError}, - transaction_context::{TransactionContext, TransactionReturnData}, + transaction_context::TransactionReturnData, }, }; -pub type TransactionCheckResult = (transaction::Result<()>, Option, Option); - pub struct TransactionResults { pub fee_collection_results: Vec>, pub execution_results: Vec, @@ -110,109 +105,3 @@ impl DurableNonceFee { } } } - -/// Extract the InnerInstructionsList from a TransactionContext -pub fn inner_instructions_list_from_instruction_trace( - transaction_context: &TransactionContext, -) -> InnerInstructionsList { - debug_assert!(transaction_context - .get_instruction_context_at_index_in_trace(0) - .map(|instruction_context| instruction_context.get_stack_height() - == TRANSACTION_LEVEL_STACK_HEIGHT) - .unwrap_or(true)); - let mut outer_instructions = Vec::new(); - for index_in_trace in 0..transaction_context.get_instruction_trace_length() { - if let Ok(instruction_context) = - transaction_context.get_instruction_context_at_index_in_trace(index_in_trace) - { - let stack_height = instruction_context.get_stack_height(); - if stack_height == TRANSACTION_LEVEL_STACK_HEIGHT { - outer_instructions.push(Vec::new()); - } else if let Some(inner_instructions) = outer_instructions.last_mut() { - let stack_height = u8::try_from(stack_height).unwrap_or(u8::MAX); - let instruction = CompiledInstruction::new_from_raw_parts( - instruction_context - .get_index_of_program_account_in_transaction( - instruction_context - .get_number_of_program_accounts() - .saturating_sub(1), - ) - .unwrap_or_default() as u8, - instruction_context.get_instruction_data().to_vec(), - (0..instruction_context.get_number_of_instruction_accounts()) - .map(|instruction_account_index| { - instruction_context - .get_index_of_instruction_account_in_transaction( - instruction_account_index, - ) - .unwrap_or_default() as u8 - }) - .collect(), - ); - inner_instructions.push(InnerInstruction { - instruction, - stack_height, - }); - } else { - debug_assert!(false); - } - } else { - debug_assert!(false); - } - } - outer_instructions -} - -#[cfg(test)] -mod tests { - use { - super::*, - solana_sdk::{sysvar::rent::Rent, transaction_context::TransactionContext}, - }; - - #[test] - fn test_inner_instructions_list_from_instruction_trace() { - let instruction_trace = [1, 2, 1, 1, 2, 3, 2]; - let mut transaction_context = - TransactionContext::new(vec![], Rent::default(), 3, instruction_trace.len()); - for (index_in_trace, stack_height) in instruction_trace.into_iter().enumerate() { - while stack_height <= transaction_context.get_instruction_context_stack_height() { - transaction_context.pop().unwrap(); - } - if stack_height > transaction_context.get_instruction_context_stack_height() { - transaction_context - .get_next_instruction_context() - .unwrap() - .configure(&[], &[], &[index_in_trace as u8]); - transaction_context.push().unwrap(); - } - } - let inner_instructions = - inner_instructions_list_from_instruction_trace(&transaction_context); - - assert_eq!( - inner_instructions, - vec![ - vec![InnerInstruction { - instruction: CompiledInstruction::new_from_raw_parts(0, vec![1], vec![]), - stack_height: 2, - }], - vec![], - vec![ - InnerInstruction { - instruction: CompiledInstruction::new_from_raw_parts(0, vec![4], vec![]), - stack_height: 2, - }, - InnerInstruction { - instruction: CompiledInstruction::new_from_raw_parts(0, vec![5], vec![]), - stack_height: 3, - }, - InnerInstruction { - instruction: CompiledInstruction::new_from_raw_parts(0, vec![6], vec![]), - stack_height: 2, - }, - ] - ] - ); - } -} diff --git a/accounts-db/src/utils.rs b/accounts-db/src/utils.rs index 7a38d23b04f68a..6ac1674a30e8da 100644 --- a/accounts-db/src/utils.rs +++ b/accounts-db/src/utils.rs @@ -1,8 +1,13 @@ use { + lazy_static, log::*, + solana_measure::measure, std::{ + collections::HashSet, fs, path::{Path, PathBuf}, + sync::Mutex, + thread, }, }; @@ -54,6 +59,82 @@ pub fn create_accounts_run_and_snapshot_dirs( Ok((run_path, snapshot_path)) } +/// Moves and asynchronously deletes the contents of a directory to avoid blocking on it. +/// The directory is re-created after the move, and should now be empty. +pub fn move_and_async_delete_path_contents(path: impl AsRef) { + move_and_async_delete_path(&path); + // The following could fail if the rename failed. + // If that happens, the directory should be left as is. + // So we ignore errors here. + _ = std::fs::create_dir(path); +} + +/// Delete directories/files asynchronously to avoid blocking on it. +/// First, in sync context, check if the original path exists, if it +/// does, rename the original path to *_to_be_deleted. +/// If there's an in-progress deleting thread for this path, return. +/// Then spawn a thread to delete the renamed path. +pub fn move_and_async_delete_path(path: impl AsRef) { + lazy_static! { + static ref IN_PROGRESS_DELETES: Mutex> = Mutex::new(HashSet::new()); + }; + + // Grab the mutex so no new async delete threads can be spawned for this path. + let mut lock = IN_PROGRESS_DELETES.lock().unwrap(); + + // If the path does not exist, there's nothing to delete. + if !path.as_ref().exists() { + return; + } + + // If the original path (`pathbuf` here) is already being deleted, + // then the path should not be moved and deleted again. + if lock.contains(path.as_ref()) { + return; + } + + let mut path_delete = path.as_ref().to_path_buf(); + path_delete.set_file_name(format!( + "{}{}", + path_delete.file_name().unwrap().to_str().unwrap(), + "_to_be_deleted" + )); + if let Err(err) = fs::rename(&path, &path_delete) { + warn!( + "Cannot async delete, retrying in sync mode: failed to rename '{}' to '{}': {err}", + path.as_ref().display(), + path_delete.display(), + ); + // Although the delete here is synchronous, we want to prevent another thread + // from moving & deleting this directory via `move_and_async_delete_path`. + lock.insert(path.as_ref().to_path_buf()); + drop(lock); // unlock before doing sync delete + + delete_contents_of_path(&path); + IN_PROGRESS_DELETES.lock().unwrap().remove(path.as_ref()); + return; + } + + lock.insert(path_delete.clone()); + drop(lock); + thread::Builder::new() + .name("solDeletePath".to_string()) + .spawn(move || { + trace!("background deleting {}...", path_delete.display()); + let (result, measure_delete) = measure!(fs::remove_dir_all(&path_delete)); + if let Err(err) = result { + panic!("Failed to async delete '{}': {err}", path_delete.display()); + } + trace!( + "background deleting {}... Done, and{measure_delete}", + path_delete.display() + ); + + IN_PROGRESS_DELETES.lock().unwrap().remove(&path_delete); + }) + .expect("spawn background delete thread"); +} + /// Delete the files and subdirectories in a directory. /// This is useful if the process does not have permission /// to delete the top level directory it might be able to diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index fb6b6f90b50233..d40273863cc7a3 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -140,9 +140,9 @@ wait_step() { } all_test_steps() { - command_step checks1 ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-checks.sh" 20 check - command_step checks2 ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-dev-context-only-utils.sh check-bins" 15 check - command_step checks3 ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-dev-context-only-utils.sh check-all-targets" 15 check + command_step checks1 "ci/docker-run-default-image.sh ci/test-checks.sh" 20 check + command_step checks2 "ci/docker-run-default-image.sh ci/test-dev-context-only-utils.sh check-bins" 15 check + command_step checks3 "ci/docker-run-default-image.sh ci/test-dev-context-only-utils.sh check-all-targets" 15 check wait_step # Full test suite @@ -156,7 +156,7 @@ all_test_steps() { ^ci/rust-version.sh \ ^ci/test-docs.sh \ ; then - command_step doctest ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-docs.sh" 15 + command_step doctest "ci/docker-run-default-image.sh ci/test-docs.sh" 15 else annotate --style info --context test-docs \ "Docs skipped as no .rs files were modified" @@ -182,7 +182,7 @@ all_test_steps() { cargo-test-sbf$ \ ; then cat >> "$output_file" <<"EOF" - - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable-sbf.sh" + - command: "ci/docker-run-default-image.sh ci/test-stable-sbf.sh" name: "stable-sbf" timeout_in_minutes: 35 artifact_paths: "sbf-dumps.tar.bz2" @@ -226,7 +226,7 @@ EOF ^ci/test-stable.sh \ ^sdk/ \ ; then - command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20 + command_step wasm "ci/docker-run-default-image.sh ci/test-wasm.sh" 20 else annotate --style info \ "wasm skipped as no relevant files were modified" @@ -258,7 +258,7 @@ EOF ^ci/test-coverage.sh \ ^scripts/coverage.sh \ ; then - command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 80 + command_step coverage "ci/docker-run-default-image.sh ci/test-coverage.sh" 80 else annotate --style info --context test-coverage \ "Coverage skipped as no .rs files were modified" @@ -296,7 +296,7 @@ pull_or_push_steps() { if [ -z "$diff_other_than_version_bump" ]; then echo "Diff only contains version bump." - command_step checks ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-checks.sh" 20 + command_step checks "ci/docker-run-default-image.sh ci/test-checks.sh" 20 exit 0 fi fi diff --git a/ci/buildkite-secondary.yml b/ci/buildkite-secondary.yml index c8bf7b4fd9fd57..c43c7ee449e758 100644 --- a/ci/buildkite-secondary.yml +++ b/ci/buildkite-secondary.yml @@ -3,7 +3,7 @@ # Pull requests to not run these steps. steps: - name: "cargo audit" - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/do-audit.sh" + command: "ci/docker-run-default-image.sh ci/do-audit.sh" agents: queue: "release-build" timeout_in_minutes: 10 diff --git a/ci/buildkite-solana-private.sh b/ci/buildkite-solana-private.sh index eeb087d323ee9a..70d8e4bfe4f59f 100755 --- a/ci/buildkite-solana-private.sh +++ b/ci/buildkite-solana-private.sh @@ -134,7 +134,7 @@ wait_step() { } all_test_steps() { - command_step checks ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-checks.sh" 20 + command_step checks "ci/docker-run-default-image.sh ci/test-checks.sh" 20 wait_step # Full test suite @@ -146,7 +146,7 @@ all_test_steps() { ^ci/rust-version.sh \ ^ci/test-docs.sh \ ; then - command_step doctest ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-docs.sh" 15 + command_step doctest "ci/docker-run-default-image.sh ci/test-docs.sh" 15 else annotate --style info --context test-docs \ "Docs skipped as no .rs files were modified" @@ -168,7 +168,7 @@ all_test_steps() { ^sdk/ \ ; then cat >> "$output_file" <<"EOF" - - command: ". ci/rust-version.sh; ci/docker-run.sh $$rust_stable_docker_image ci/test-stable-sbf.sh" + - command: "ci/docker-run-default-image.sh ci/test-stable-sbf.sh" name: "stable-sbf" timeout_in_minutes: 35 artifact_paths: "sbf-dumps.tar.bz2" @@ -208,7 +208,7 @@ EOF ^ci/test-stable.sh \ ^sdk/ \ ; then - command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20 + command_step wasm "ci/docker-run-default-image.sh ci/test-wasm.sh" 20 else annotate --style info \ "wasm skipped as no relevant files were modified" @@ -238,7 +238,7 @@ EOF ^ci/test-coverage.sh \ ^scripts/coverage.sh \ ; then - command_step coverage ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_nightly_docker_image ci/test-coverage.sh" 80 + command_step coverage "ci/docker-run-default-image.sh ci/test-coverage.sh" 80 else annotate --style info --context test-coverage \ "Coverage skipped as no .rs files were modified" diff --git a/ci/dependabot-pr.sh b/ci/dependabot-pr.sh index 9ef6816cec5b96..91ecd5948c9a43 100755 --- a/ci/dependabot-pr.sh +++ b/ci/dependabot-pr.sh @@ -11,7 +11,7 @@ fi source ci/rust-version.sh stable -ci/docker-run.sh $rust_nightly_docker_image ci/dependabot-updater.sh +ci/docker-run-default-image.sh ci/dependabot-updater.sh if [[ $(git status --short :**/Cargo.lock | wc -l) -eq 0 ]]; then echo --- ok diff --git a/ci/docker-run-default-image.sh b/ci/docker-run-default-image.sh new file mode 100755 index 00000000000000..927167cc8ef1ac --- /dev/null +++ b/ci/docker-run-default-image.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +set -e + +here="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# shellcheck disable=SC1091 +source "$here/rust-version.sh" + +"$here/docker-run.sh" "${ci_docker_image:?}" "$@" diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile deleted file mode 100644 index 60d48cc22594f4..00000000000000 --- a/ci/docker-rust-nightly/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM solanalabs/rust:1.75.0 - -ARG date -ARG GRCOV_VERSION=v0.8.18 - -RUN \ - rustup install nightly-$date && \ - rustup component add clippy --toolchain=nightly-$date && \ - rustup component add rustfmt --toolchain=nightly-$date && \ - rustup show && \ - rustc --version && \ - cargo --version && \ - # grcov - curl -LOsS "https://github.com/mozilla/grcov/releases/download/$GRCOV_VERSION/grcov-x86_64-unknown-linux-musl.tar.bz2" && \ - tar -xf grcov-x86_64-unknown-linux-musl.tar.bz2 && \ - mv ./grcov $CARGO_HOME/bin && \ - rm grcov-x86_64-unknown-linux-musl.tar.bz2 && \ - # codecov - curl -Os https://uploader.codecov.io/latest/linux/codecov && \ - chmod +x codecov && \ - mv codecov /usr/bin diff --git a/ci/docker-rust-nightly/README.md b/ci/docker-rust-nightly/README.md deleted file mode 100644 index f4e7931f3d8511..00000000000000 --- a/ci/docker-rust-nightly/README.md +++ /dev/null @@ -1,42 +0,0 @@ -Docker image containing rust nightly and some preinstalled crates used in CI. - -This image may be manually updated by running `CI=true ./build.sh` if you are a member -of the [Solana Labs](https://hub.docker.com/u/solanalabs/) Docker Hub -organization. - -## Moving to a newer nightly - -NOTE: Follow instructions in docker-rust/README.md before this when updating the stable -rust version as well. - -We pin the version of nightly (see the `ARG nightly=xyz` line in `Dockerfile`) -to avoid the build breaking at unexpected times, as occasionally nightly will -introduce breaking changes. - -To update the pinned version: -1. Edit `Dockerfile` to match the desired stable rust version to base on if needed. -1. Run `ci/docker-rust-nightly/build.sh` to rebuild the nightly image locally, - or potentially `ci/docker-rust-nightly/build.sh YYYY-MM-DD` if there's a - specific YYYY-MM-DD that is desired (default is today's build). - Check https://rust-lang.github.io/rustup-components-history/ for build - status -1. Update `ci/rust-version.sh` to reflect the new nightly `YYYY-MM-DD` -1. Run `SOLANA_ALLOCATE_TTY=1 SOLANA_DOCKER_RUN_NOSETUID=1 ci/docker-run.sh --nopull solanalabs/rust-nightly:YYYY-MM-DD ci/test-checks.sh` - and `SOLANA_ALLOCATE_TTY=1 SOLANA_DOCKER_RUN_NOSETUID=1 ci/docker-run.sh --nopull solanalabs/rust-nightly:YYYY-MM-DD ci/test-coverage.sh [args]...` - to confirm the new nightly image builds. Fix any issues as needed -1. Run `docker login` to enable pushing images to Docker Hub, if you're authorized. -1. Run `CI=true ci/docker-rust-nightly/build.sh YYYY-MM-DD` to push the new nightly image to dockerhub.com. -1. Send a PR with the `ci/rust-version.sh` change and any codebase adjustments needed. - -## Troubleshooting - -### Resource is denied - -When running `CI=true ci/docker-rust-nightly/build.sh`, you see: - -``` -denied: requested access to the resource is denied -``` - -Run `docker login` to enable pushing images to Docker Hub. Contact @mvines or @garious -to get write access. diff --git a/ci/docker-rust-nightly/build.sh b/ci/docker-rust-nightly/build.sh deleted file mode 100755 index dad7221e5dbecf..00000000000000 --- a/ci/docker-rust-nightly/build.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -set -ex - -cd "$(dirname "$0")" - -platform=() -if [[ $(uname -m) = arm64 ]]; then - # Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr - platform+=(--platform linux/amd64) -fi - -nightlyDate=${1:-$(date +%Y-%m-%d)} -docker build "${platform[@]}" -t solanalabs/rust-nightly:"$nightlyDate" --build-arg date="$nightlyDate" . - -maybeEcho= -if [[ -z $CI ]]; then - echo "Not CI, skipping |docker push|" - maybeEcho="echo" -fi -$maybeEcho docker push solanalabs/rust-nightly:"$nightlyDate" diff --git a/ci/docker-rust/README.md b/ci/docker-rust/README.md deleted file mode 100644 index 3f818476867be3..00000000000000 --- a/ci/docker-rust/README.md +++ /dev/null @@ -1,11 +0,0 @@ -Docker image containing rust and some preinstalled packages used in CI. - -NOTE: Recreate rust-nightly docker image after this when updating the stable rust -version! Both docker images must be updated in tandem. - -This image is manually maintained: -1. Edit `Dockerfile` to match the desired rust version -1. Run `docker login` to enable pushing images to Docker Hub, if you're authorized. -1. Run `./build.sh` to publish the new image, if you are a member of the [Solana - Labs](https://hub.docker.com/u/solanalabs/) Docker Hub organization. - diff --git a/ci/docker-rust/build.sh b/ci/docker-rust/build.sh deleted file mode 100755 index 360bbbcbe3bcb3..00000000000000 --- a/ci/docker-rust/build.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash -set -ex - -cd "$(dirname "$0")" - - -platform=() -if [[ $(uname -m) = arm64 ]]; then - # Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr - platform+=(--platform linux/amd64) -fi - -docker build "${platform[@]}" -t solanalabs/rust . - -read -r rustc version _ < <(docker run solanalabs/rust rustc --version) -[[ $rustc = rustc ]] -docker tag solanalabs/rust:latest solanalabs/rust:"$version" -docker push solanalabs/rust:"$version" -docker push solanalabs/rust:latest diff --git a/ci/docker-rust/Dockerfile b/ci/docker/Dockerfile similarity index 78% rename from ci/docker-rust/Dockerfile rename to ci/docker/Dockerfile index 227d5f55d7753b..cee80877c6db5d 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker/Dockerfile @@ -1,10 +1,12 @@ FROM ubuntu:20.04 ARG \ - RUST_VERSION=1.75.0 \ + RUST_VERSION= \ + RUST_NIGHTLY_VERSION= \ GOLANG_VERSION=1.21.3 \ NODE_MAJOR=18 \ - SCCACHE_VERSION=v0.5.4 + SCCACHE_VERSION=v0.5.4 \ + GRCOV_VERSION=v0.8.18 SHELL ["/bin/bash", "-o", "pipefail", "-c"] @@ -21,7 +23,10 @@ ENV \ CARGO_HOME=/usr/local/cargo \ PATH="$PATH:/usr/local/cargo/bin" -RUN apt-get update && \ +RUN \ + if [ -z "$RUST_VERSION" ]; then echo "ERROR: The RUST_VERSION argument is required!" && exit 1; fi && \ + if [ -z "$RUST_NIGHTLY_VERSION" ]; then echo "ERROR: The RUST_NIGHTLY_VERSION argument is required!" && exit 1; fi && \ + apt-get update && \ apt-get install --no-install-recommends -y \ # basic tzdata \ @@ -65,6 +70,9 @@ RUN apt-get update && \ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs/ | sh -s -- --no-modify-path --profile minimal --default-toolchain $RUST_VERSION -y && \ rustup component add rustfmt && \ rustup component add clippy && \ + rustup install $RUST_NIGHTLY_VERSION && \ + rustup component add clippy --toolchain=$RUST_NIGHTLY_VERSION && \ + rustup component add rustfmt --toolchain=$RUST_NIGHTLY_VERSION && \ rustup target add wasm32-unknown-unknown && \ cargo install cargo-audit && \ cargo install cargo-hack && \ @@ -74,6 +82,9 @@ RUN apt-get update && \ cargo install svgbob_cli && \ cargo install wasm-pack && \ cargo install rustfilt && \ + rustup show && \ + rustc --version && \ + cargo --version && \ chmod -R a+w $CARGO_HOME $RUSTUP_HOME && \ rm -rf $CARGO_HOME/registry && \ # sccache @@ -101,5 +112,14 @@ RUN apt-get update && \ chmod -R a+w /.config && \ mkdir /.npm && \ chmod -R a+w /.npm && \ + # grcov + curl -LOsS "https://github.com/mozilla/grcov/releases/download/$GRCOV_VERSION/grcov-x86_64-unknown-linux-musl.tar.bz2" && \ + tar -xf grcov-x86_64-unknown-linux-musl.tar.bz2 && \ + mv ./grcov $CARGO_HOME/bin && \ + rm grcov-x86_64-unknown-linux-musl.tar.bz2 && \ + # codecov + curl -Os https://uploader.codecov.io/latest/linux/codecov && \ + chmod +x codecov && \ + mv codecov /usr/bin && \ # clean lists rm -rf /var/lib/apt/lists/* diff --git a/ci/docker/README.md b/ci/docker/README.md new file mode 100644 index 00000000000000..58bd9accdb14a1 --- /dev/null +++ b/ci/docker/README.md @@ -0,0 +1,11 @@ +Docker image containing rust, rust nightly and some preinstalled packages used in CI + +This image is manually maintained: + +#### CLI + +1. Edit + 1. `ci/rust-version.sh` for rust and rust nightly version + 2. `ci/docker/Dockerfile` for other packages +2. Ensure you're a member of the [Solana Docker Hub Organization](https://hub.docker.com/u/solanalabs/) and already `docker login` +3. Run `ci/docker/build.sh` to build/publish the new image diff --git a/ci/docker/build.sh b/ci/docker/build.sh new file mode 100755 index 00000000000000..0c20c5e928d94d --- /dev/null +++ b/ci/docker/build.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -e + +here="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# shellcheck disable=SC1091 +source "$here/../rust-version.sh" + +platform=() +if [[ $(uname -m) = arm64 ]]; then + # Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr + platform+=(--platform linux/amd64) +fi + +echo "build image: ${ci_docker_image:?}" +docker build "${platform[@]}" \ + -f "$here/Dockerfile" \ + --build-arg "RUST_VERSION=${rust_stable:?}" \ + --build-arg "RUST_NIGHTLY_VERSION=${rust_nightly:?}" \ + -t "$ci_docker_image" . + +docker push "$ci_docker_image" diff --git a/ci/publish-crate.sh b/ci/publish-crate.sh index 099d02129e3cb8..5d7f3b1e1e1c50 100755 --- a/ci/publish-crate.sh +++ b/ci/publish-crate.sh @@ -72,7 +72,7 @@ for Cargo_toml in $Cargo_tomls; do echo "Attempt ${i} of ${numRetries}" # The rocksdb package does not build with the stock rust docker image so use # the solana rust docker image - if ci/docker-run.sh "$rust_stable_docker_image" bash -exc "cd $crate; $cargoCommand"; then + if ci/docker-run-default-image.sh bash -exc "cd $crate; $cargoCommand"; then break fi diff --git a/ci/rust-version.sh b/ci/rust-version.sh index 3db1a843fa105b..3321f1d5ecb6a1 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -34,10 +34,10 @@ fi export rust_stable="$stable_version" -export rust_stable_docker_image=solanalabs/rust:"$stable_version" export rust_nightly=nightly-"$nightly_version" -export rust_nightly_docker_image=solanalabs/rust-nightly:"$nightly_version" + +export ci_docker_image="solanalabs/ci:rust_${rust_stable}_${rust_nightly}" [[ -z $1 ]] || ( diff --git a/ci/test.sh b/ci/test.sh new file mode 100644 index 00000000000000..987f2a6cf36153 --- /dev/null +++ b/ci/test.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# Get the directory of the current script +script_dir_by_bash_source=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) + +script_dir_by_0=$(cd "$(dirname "$0")" && pwd) + +echo "script_dir_by_bash_source = $script_dir_by_bash_source" +echo "script_dir_by_0 = $script_dir_by_0" diff --git a/core/Cargo.toml b/core/Cargo.toml index fa6c7cd2052aea..e2a936cdabc4c1 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -65,6 +65,7 @@ solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-send-transaction-service = { workspace = true } solana-streamer = { workspace = true } +solana-svm = { workspace = true } solana-tpu-client = { workspace = true } solana-transaction-status = { workspace = true } solana-turbine = { workspace = true } diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 158614b32d7963..652f2569f8fd43 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -659,7 +659,10 @@ impl BankingStage { } let (decision, make_decision_time) = measure!(decision_maker.make_consume_or_forward_decision()); - let metrics_action = slot_metrics_tracker.check_leader_slot_boundary(decision.bank_start()); + let metrics_action = slot_metrics_tracker.check_leader_slot_boundary( + decision.bank_start(), + Some(unprocessed_transaction_storage), + ); slot_metrics_tracker.increment_make_decision_us(make_decision_time.as_us()); match decision { diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index d3a53aa42e91b8..92fb07ddfab18c 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -5,10 +5,10 @@ use { scheduler_messages::{ConsumeWork, FinishedConsumeWork}, }, crossbeam_channel::{Receiver, RecvError, SendError, Sender}, - solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, solana_poh::leader_bank_notifier::LeaderBankNotifier, solana_runtime::bank::Bank, solana_sdk::timing::AtomicInterval, + solana_svm::transaction_error_metrics::TransactionErrorMetrics, std::{ sync::{ atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, @@ -212,6 +212,8 @@ impl ConsumeWorkerMetrics { retryable_transaction_indexes, execute_and_commit_timings, error_counters, + min_prioritization_fees, + max_prioritization_fees, .. }: &ExecuteAndCommitTransactionsOutput, ) { @@ -227,7 +229,20 @@ impl ConsumeWorkerMetrics { self.count_metrics .retryable_transaction_count .fetch_add(retryable_transaction_indexes.len(), Ordering::Relaxed); - + let min_prioritization_fees = self + .count_metrics + .min_prioritization_fees + .fetch_min(*min_prioritization_fees, Ordering::Relaxed); + let max_prioritization_fees = self + .count_metrics + .max_prioritization_fees + .fetch_max(*max_prioritization_fees, Ordering::Relaxed); + self.count_metrics + .min_prioritization_fees + .swap(min_prioritization_fees, Ordering::Relaxed); + self.count_metrics + .max_prioritization_fees + .swap(max_prioritization_fees, Ordering::Relaxed); self.update_on_execute_and_commit_timings(execute_and_commit_timings); self.update_on_error_counters(error_counters); } @@ -368,7 +383,6 @@ impl ConsumeWorkerMetrics { } } -#[derive(Default)] struct ConsumeWorkerCountMetrics { transactions_attempted_execution_count: AtomicUsize, executed_transactions_count: AtomicUsize, @@ -376,6 +390,23 @@ struct ConsumeWorkerCountMetrics { retryable_transaction_count: AtomicUsize, retryable_expired_bank_count: AtomicUsize, cost_model_throttled_transactions_count: AtomicUsize, + min_prioritization_fees: AtomicU64, + max_prioritization_fees: AtomicU64, +} + +impl Default for ConsumeWorkerCountMetrics { + fn default() -> Self { + Self { + transactions_attempted_execution_count: AtomicUsize::default(), + executed_transactions_count: AtomicUsize::default(), + executed_with_successful_result_count: AtomicUsize::default(), + retryable_transaction_count: AtomicUsize::default(), + retryable_expired_bank_count: AtomicUsize::default(), + cost_model_throttled_transactions_count: AtomicUsize::default(), + min_prioritization_fees: AtomicU64::new(u64::MAX), + max_prioritization_fees: AtomicU64::default(), + } + } } impl ConsumeWorkerCountMetrics { @@ -416,6 +447,17 @@ impl ConsumeWorkerCountMetrics { .swap(0, Ordering::Relaxed), i64 ), + ( + "min_prioritization_fees", + self.min_prioritization_fees + .swap(u64::MAX, Ordering::Relaxed), + i64 + ), + ( + "max_prioritization_fees", + self.max_prioritization_fees.swap(0, Ordering::Relaxed), + i64 + ), ); } } diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index ad42da3bafbb77..938a5dd52a2549 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -9,10 +9,6 @@ use { BankingStageStats, }, itertools::Itertools, - solana_accounts_db::{ - transaction_error_metrics::TransactionErrorMetrics, - transaction_results::TransactionCheckResult, - }, solana_ledger::token_balances::collect_token_balances, solana_measure::{measure::Measure, measure_us}, solana_poh::poh_recorder::{ @@ -24,7 +20,7 @@ use { }, solana_runtime::{ bank::{Bank, LoadAndExecuteTransactionsOutput}, - svm::account_loader::validate_fee_payer, + compute_budget_details::GetComputeBudgetDetails, transaction_batch::TransactionBatch, }, solana_sdk::{ @@ -35,6 +31,10 @@ use { timing::timestamp, transaction::{self, AddressLoader, SanitizedTransaction, TransactionError}, }, + solana_svm::{ + account_loader::{validate_fee_payer, TransactionCheckResult}, + transaction_error_metrics::TransactionErrorMetrics, + }, std::{ sync::{atomic::Ordering, Arc}, time::Instant, @@ -69,6 +69,8 @@ pub struct ExecuteAndCommitTransactionsOutput { pub commit_transactions_result: Result, PohRecorderError>, pub(crate) execute_and_commit_timings: LeaderExecuteAndCommitTimings, pub(crate) error_counters: TransactionErrorMetrics, + pub(crate) min_prioritization_fees: u64, + pub(crate) max_prioritization_fees: u64, } pub struct Consumer { @@ -291,6 +293,8 @@ impl Consumer { let mut total_execute_and_commit_timings = LeaderExecuteAndCommitTimings::default(); let mut total_error_counters = TransactionErrorMetrics::default(); let mut reached_max_poh_height = false; + let mut overall_min_prioritization_fees: u64 = u64::MAX; + let mut overall_max_prioritization_fees: u64 = 0; while chunk_start != transactions.len() { let chunk_end = std::cmp::min( transactions.len(), @@ -321,6 +325,8 @@ impl Consumer { commit_transactions_result: new_commit_transactions_result, execute_and_commit_timings: new_execute_and_commit_timings, error_counters: new_error_counters, + min_prioritization_fees, + max_prioritization_fees, .. } = execute_and_commit_transactions_output; @@ -330,6 +336,10 @@ impl Consumer { total_transactions_attempted_execution_count, new_transactions_attempted_execution_count ); + overall_min_prioritization_fees = + std::cmp::min(overall_min_prioritization_fees, min_prioritization_fees); + overall_max_prioritization_fees = + std::cmp::min(overall_max_prioritization_fees, max_prioritization_fees); trace!( "process_transactions result: {:?}", @@ -390,6 +400,8 @@ impl Consumer { cost_model_us: total_cost_model_us, execute_and_commit_timings: total_execute_and_commit_timings, error_counters: total_error_counters, + min_prioritization_fees: overall_min_prioritization_fees, + max_prioritization_fees: overall_max_prioritization_fees, } } @@ -567,6 +579,19 @@ impl Consumer { }); execute_and_commit_timings.collect_balances_us = collect_balances_us; + let min_max = batch + .sanitized_transactions() + .iter() + .filter_map(|transaction| { + let round_compute_unit_price_enabled = false; // TODO get from working_bank.feature_set + transaction + .get_compute_budget_details(round_compute_unit_price_enabled) + .map(|details| details.compute_unit_price) + }) + .minmax(); + let (min_prioritization_fees, max_prioritization_fees) = + min_max.into_option().unwrap_or_default(); + let (load_and_execute_transactions_output, load_execute_us) = measure_us!(bank .load_and_execute_transactions( batch, @@ -648,6 +673,8 @@ impl Consumer { commit_transactions_result: Err(recorder_err), execute_and_commit_timings, error_counters, + min_prioritization_fees, + max_prioritization_fees, }; } @@ -703,6 +730,8 @@ impl Consumer { commit_transactions_result: Ok(commit_transaction_statuses), execute_and_commit_timings, error_counters, + min_prioritization_fees, + max_prioritization_fees, } } diff --git a/core/src/banking_stage/immutable_deserialized_packet.rs b/core/src/banking_stage/immutable_deserialized_packet.rs index 8a9d82e32a38c0..26ede7045d3480 100644 --- a/core/src/banking_stage/immutable_deserialized_packet.rs +++ b/core/src/banking_stage/immutable_deserialized_packet.rs @@ -1,8 +1,6 @@ use { solana_perf::packet::Packet, - solana_runtime::transaction_priority_details::{ - GetTransactionPriorityDetails, TransactionPriorityDetails, - }, + solana_runtime::compute_budget_details::{ComputeBudgetDetails, GetComputeBudgetDetails}, solana_sdk::{ feature_set, hash::Hash, @@ -42,7 +40,7 @@ pub struct ImmutableDeserializedPacket { transaction: SanitizedVersionedTransaction, message_hash: Hash, is_simple_vote: bool, - priority_details: TransactionPriorityDetails, + compute_budget_details: ComputeBudgetDetails, } impl ImmutableDeserializedPacket { @@ -54,13 +52,13 @@ impl ImmutableDeserializedPacket { let is_simple_vote = packet.meta().is_simple_vote_tx(); // drop transaction if prioritization fails. - let mut priority_details = sanitized_transaction - .get_transaction_priority_details(packet.meta().round_compute_unit_price()) + let mut compute_budget_details = sanitized_transaction + .get_compute_budget_details(packet.meta().round_compute_unit_price()) .ok_or(DeserializedPacketError::PrioritizationFailure)?; - // set priority to zero for vote transactions + // set compute unit price to zero for vote transactions if is_simple_vote { - priority_details.priority = 0; + compute_budget_details.compute_unit_price = 0; }; Ok(Self { @@ -68,7 +66,7 @@ impl ImmutableDeserializedPacket { transaction: sanitized_transaction, message_hash, is_simple_vote, - priority_details, + compute_budget_details, }) } @@ -88,16 +86,16 @@ impl ImmutableDeserializedPacket { self.is_simple_vote } - pub fn priority(&self) -> u64 { - self.priority_details.priority + pub fn compute_unit_price(&self) -> u64 { + self.compute_budget_details.compute_unit_price } pub fn compute_unit_limit(&self) -> u64 { - self.priority_details.compute_unit_limit + self.compute_budget_details.compute_unit_limit } - pub fn priority_details(&self) -> TransactionPriorityDetails { - self.priority_details.clone() + pub fn compute_budget_details(&self) -> ComputeBudgetDetails { + self.compute_budget_details.clone() } // This function deserializes packets into transactions, computes the blake3 hash of transaction @@ -131,7 +129,7 @@ impl PartialOrd for ImmutableDeserializedPacket { impl Ord for ImmutableDeserializedPacket { fn cmp(&self, other: &Self) -> Ordering { - self.priority().cmp(&other.priority()) + self.compute_unit_price().cmp(&other.compute_unit_price()) } } diff --git a/core/src/banking_stage/leader_slot_metrics.rs b/core/src/banking_stage/leader_slot_metrics.rs index 449ff7801991fa..88ea6b5ee340cf 100644 --- a/core/src/banking_stage/leader_slot_metrics.rs +++ b/core/src/banking_stage/leader_slot_metrics.rs @@ -1,11 +1,13 @@ use { super::{ leader_slot_timing_metrics::{LeaderExecuteAndCommitTimings, LeaderSlotTimingMetrics}, - unprocessed_transaction_storage::InsertPacketBatchSummary, + unprocessed_transaction_storage::{ + InsertPacketBatchSummary, UnprocessedTransactionStorage, + }, }, - solana_accounts_db::transaction_error_metrics::*, solana_poh::poh_recorder::BankStart, solana_sdk::{clock::Slot, saturating_add_assign}, + solana_svm::transaction_error_metrics::*, std::time::Instant, }; @@ -52,6 +54,53 @@ pub(crate) struct ProcessTransactionsSummary { // Breakdown of all the transaction errors from transactions passed for execution pub error_counters: TransactionErrorMetrics, + + pub min_prioritization_fees: u64, + pub max_prioritization_fees: u64, +} + +// Metrics describing prioritization fee information for each transaction storage before processing transactions +#[derive(Debug, Default)] +struct LeaderPrioritizationFeesMetrics { + // minimum prioritization fees in the MinMaxHeap + min_prioritization_fees_per_cu: u64, + // maximum prioritization fees in the MinMaxHeap + max_prioritization_fees_per_cu: u64, +} + +impl LeaderPrioritizationFeesMetrics { + fn new(unprocessed_transaction_storage: Option<&UnprocessedTransactionStorage>) -> Self { + if let Some(unprocessed_transaction_storage) = unprocessed_transaction_storage { + Self { + min_prioritization_fees_per_cu: unprocessed_transaction_storage + .get_min_priority() + .unwrap_or_default(), + max_prioritization_fees_per_cu: unprocessed_transaction_storage + .get_max_priority() + .unwrap_or_default(), + } + } else { + Self::default() + } + } + + fn report(&self, id: u32, slot: Slot) { + datapoint_info!( + "banking_stage-leader_prioritization_fees_info", + ("id", id, i64), + ("slot", slot, i64), + ( + "min_prioritization_fees_per_cu", + self.min_prioritization_fees_per_cu, + i64 + ), + ( + "max_prioritization_fees_per_cu", + self.max_prioritization_fees_per_cu, + i64 + ) + ); + } } // Metrics describing packets ingested/processed in various parts of BankingStage during this @@ -138,6 +187,11 @@ struct LeaderSlotPacketCountMetrics { // total number of forwardable batches that were attempted for forwarding. A forwardable batch // is defined in `ForwardPacketBatchesByAccounts` in `forward_packet_batches_by_accounts.rs` forwardable_batches_count: u64, + + // min prioritization fees for scheduled transactions + min_prioritization_fees: u64, + // max prioritization fees for scheduled transactions + max_prioritization_fees: u64, } impl LeaderSlotPacketCountMetrics { @@ -255,6 +309,16 @@ impl LeaderSlotPacketCountMetrics { self.end_of_slot_unprocessed_buffer_len as i64, i64 ), + ( + "min_prioritization_fees", + self.min_prioritization_fees as i64, + i64 + ), + ( + "max_prioritization_fees", + self.max_prioritization_fees as i64, + i64 + ), ); } } @@ -277,12 +341,19 @@ pub(crate) struct LeaderSlotMetrics { timing_metrics: LeaderSlotTimingMetrics, + prioritization_fees_metric: LeaderPrioritizationFeesMetrics, + // Used by tests to check if the `self.report()` method was called is_reported: bool, } impl LeaderSlotMetrics { - pub(crate) fn new(id: u32, slot: Slot, bank_creation_time: &Instant) -> Self { + pub(crate) fn new( + id: u32, + slot: Slot, + bank_creation_time: &Instant, + unprocessed_transaction_storage: Option<&UnprocessedTransactionStorage>, + ) -> Self { Self { id, slot, @@ -290,6 +361,9 @@ impl LeaderSlotMetrics { transaction_error_metrics: TransactionErrorMetrics::new(), vote_packet_count_metrics: VotePacketCountMetrics::new(), timing_metrics: LeaderSlotTimingMetrics::new(bank_creation_time), + prioritization_fees_metric: LeaderPrioritizationFeesMetrics::new( + unprocessed_transaction_storage, + ), is_reported: false, } } @@ -301,6 +375,7 @@ impl LeaderSlotMetrics { self.transaction_error_metrics.report(self.id, self.slot); self.packet_count_metrics.report(self.id, self.slot); self.vote_packet_count_metrics.report(self.id, self.slot); + self.prioritization_fees_metric.report(self.id, self.slot); } /// Returns `Some(self.slot)` if the metrics have been reported, otherwise returns None @@ -372,6 +447,7 @@ impl LeaderSlotMetricsTracker { pub(crate) fn check_leader_slot_boundary( &mut self, bank_start: Option<&BankStart>, + unprocessed_transaction_storage: Option<&UnprocessedTransactionStorage>, ) -> MetricsTrackerAction { match (self.leader_slot_metrics.as_mut(), bank_start) { (None, None) => MetricsTrackerAction::Noop, @@ -387,6 +463,7 @@ impl LeaderSlotMetricsTracker { self.id, bank_start.working_bank.slot(), &bank_start.bank_creation_time, + unprocessed_transaction_storage, ))) } @@ -398,6 +475,7 @@ impl LeaderSlotMetricsTracker { self.id, bank_start.working_bank.slot(), &bank_start.bank_creation_time, + unprocessed_transaction_storage, ))) } else { MetricsTrackerAction::Noop @@ -449,6 +527,8 @@ impl LeaderSlotMetricsTracker { cost_model_us, ref execute_and_commit_timings, error_counters, + min_prioritization_fees, + max_prioritization_fees, .. } = process_transactions_summary; @@ -525,6 +605,23 @@ impl LeaderSlotMetricsTracker { *cost_model_us ); + leader_slot_metrics + .packet_count_metrics + .min_prioritization_fees = std::cmp::min( + leader_slot_metrics + .packet_count_metrics + .min_prioritization_fees, + *min_prioritization_fees, + ); + leader_slot_metrics + .packet_count_metrics + .max_prioritization_fees = std::cmp::min( + leader_slot_metrics + .packet_count_metrics + .max_prioritization_fees, + *max_prioritization_fees, + ); + leader_slot_metrics .timing_metrics .execute_and_commit_timings @@ -896,7 +993,7 @@ mod tests { .. } = setup_test_slot_boundary_banks(); // Test that with no bank being tracked, and no new bank being tracked, nothing is reported - let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None, None); assert_eq!( mem::discriminant(&MetricsTrackerAction::Noop), mem::discriminant(&action) @@ -916,8 +1013,8 @@ mod tests { // Test case where the thread has not detected a leader bank, and now sees a leader bank. // Metrics should not be reported because leader slot has not ended assert!(leader_slot_metrics_tracker.leader_slot_metrics.is_none()); - let action = - leader_slot_metrics_tracker.check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + let action = leader_slot_metrics_tracker + .check_leader_slot_boundary(Some(&first_poh_recorder_bank), None); assert_eq!( mem::discriminant(&MetricsTrackerAction::NewTracker(None)), mem::discriminant(&action) @@ -941,12 +1038,12 @@ mod tests { { // Setup first_bank let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + .check_leader_slot_boundary(Some(&first_poh_recorder_bank), None); assert!(leader_slot_metrics_tracker.apply_action(action).is_none()); } { // Assert reporting if slot has ended - let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None, None); assert_eq!( mem::discriminant(&MetricsTrackerAction::ReportAndResetTracker), mem::discriminant(&action) @@ -959,7 +1056,7 @@ mod tests { } { // Assert no-op if still no new bank - let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None, None); assert_eq!( mem::discriminant(&MetricsTrackerAction::Noop), mem::discriminant(&action) @@ -981,13 +1078,13 @@ mod tests { { // Setup with first_bank let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + .check_leader_slot_boundary(Some(&first_poh_recorder_bank), None); assert!(leader_slot_metrics_tracker.apply_action(action).is_none()); } { // Assert nop-op if same bank let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + .check_leader_slot_boundary(Some(&first_poh_recorder_bank), None); assert_eq!( mem::discriminant(&MetricsTrackerAction::Noop), mem::discriminant(&action) @@ -996,7 +1093,7 @@ mod tests { } { // Assert reporting if slot has ended - let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None, None); assert_eq!( mem::discriminant(&MetricsTrackerAction::ReportAndResetTracker), mem::discriminant(&action) @@ -1025,13 +1122,13 @@ mod tests { { // Setup with first_bank let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + .check_leader_slot_boundary(Some(&first_poh_recorder_bank), None); assert!(leader_slot_metrics_tracker.apply_action(action).is_none()); } { // Assert reporting if new bank let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&next_poh_recorder_bank)); + .check_leader_slot_boundary(Some(&next_poh_recorder_bank), None); assert_eq!( mem::discriminant(&MetricsTrackerAction::ReportAndNewTracker(None)), mem::discriminant(&action) @@ -1044,7 +1141,7 @@ mod tests { } { // Assert reporting if slot has ended - let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None, None); assert_eq!( mem::discriminant(&MetricsTrackerAction::ReportAndResetTracker), mem::discriminant(&action) @@ -1072,13 +1169,13 @@ mod tests { { // Setup with next_bank let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&next_poh_recorder_bank)); + .check_leader_slot_boundary(Some(&next_poh_recorder_bank), None); assert!(leader_slot_metrics_tracker.apply_action(action).is_none()); } { // Assert reporting if new bank let action = leader_slot_metrics_tracker - .check_leader_slot_boundary(Some(&first_poh_recorder_bank)); + .check_leader_slot_boundary(Some(&first_poh_recorder_bank), None); assert_eq!( mem::discriminant(&MetricsTrackerAction::ReportAndNewTracker(None)), mem::discriminant(&action) @@ -1091,7 +1188,7 @@ mod tests { } { // Assert reporting if slot has ended - let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None); + let action = leader_slot_metrics_tracker.check_leader_slot_boundary(None, None); assert_eq!( mem::discriminant(&MetricsTrackerAction::ReportAndResetTracker), mem::discriminant(&action) diff --git a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs index e17f34d3223411..e0b53a97ab020e 100644 --- a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs +++ b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs @@ -191,7 +191,7 @@ impl PrioGraphScheduler { saturating_add_assign!(num_scheduled, 1); let sanitized_transaction_ttl = transaction_state.transition_to_pending(); - let cost = transaction_state.transaction_cost().sum(); + let cost = transaction_state.cost(); let SanitizedTransactionTTL { transaction, @@ -490,12 +490,9 @@ mod tests { crate::banking_stage::consumer::TARGET_NUM_TRANSACTIONS_PER_BATCH, crossbeam_channel::{unbounded, Receiver}, itertools::Itertools, - solana_cost_model::cost_model::CostModel, - solana_runtime::transaction_priority_details::TransactionPriorityDetails, solana_sdk::{ - compute_budget::ComputeBudgetInstruction, feature_set::FeatureSet, hash::Hash, - message::Message, pubkey::Pubkey, signature::Keypair, signer::Signer, - system_instruction, transaction::Transaction, + compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, pubkey::Pubkey, + signature::Keypair, signer::Signer, system_instruction, transaction::Transaction, }, std::borrow::Borrow, }; @@ -562,25 +559,26 @@ mod tests { >, ) -> TransactionStateContainer { let mut container = TransactionStateContainer::with_capacity(10 * 1024); - for (index, (from_keypair, to_pubkeys, lamports, priority)) in + for (index, (from_keypair, to_pubkeys, lamports, compute_unit_price)) in tx_infos.into_iter().enumerate() { let id = TransactionId::new(index as u64); - let transaction = - prioritized_tranfers(from_keypair.borrow(), to_pubkeys, lamports, priority); - let transaction_cost = CostModel::calculate_cost(&transaction, &FeatureSet::default()); + let transaction = prioritized_tranfers( + from_keypair.borrow(), + to_pubkeys, + lamports, + compute_unit_price, + ); let transaction_ttl = SanitizedTransactionTTL { transaction, max_age_slot: Slot::MAX, }; + const TEST_TRANSACTION_COST: u64 = 5000; container.insert_new_transaction( id, transaction_ttl, - TransactionPriorityDetails { - priority, - compute_unit_limit: 1, - }, - transaction_cost, + compute_unit_price, + TEST_TRANSACTION_COST, ); } diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index c336f56f8949c3..a5c0fa134f5369 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -17,14 +17,17 @@ use { TOTAL_BUFFERED_PACKETS, }, crossbeam_channel::RecvTimeoutError, - solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, + itertools::MinMaxResult, solana_cost_model::cost_model::CostModel, solana_measure::measure_us, + solana_program_runtime::compute_budget_processor::process_compute_budget_instructions, solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{ - clock::MAX_PROCESSING_AGE, saturating_add_assign, timing::AtomicInterval, - transaction::SanitizedTransaction, + clock::MAX_PROCESSING_AGE, + feature_set::include_loaded_accounts_data_size_in_fee_calculation, fee::FeeBudgetLimits, + saturating_add_assign, timing::AtomicInterval, transaction::SanitizedTransaction, }, + solana_svm::transaction_error_metrics::TransactionErrorMetrics, std::{ sync::{Arc, RwLock}, time::Duration, @@ -95,10 +98,11 @@ impl SchedulerController { if !self.receive_and_buffer_packets(&decision) { break; } - // Report metrics only if there is data. // Reset intervals when appropriate, regardless of report. let should_report = self.count_metrics.has_data(); + self.count_metrics + .update_priority_stats(self.container.get_min_max_priority()); self.count_metrics.maybe_report_and_reset(should_report); self.timing_metrics.maybe_report_and_reset(should_report); self.worker_metrics @@ -309,21 +313,24 @@ impl SchedulerController { let mut error_counts = TransactionErrorMetrics::default(); for chunk in packets.chunks(CHUNK_SIZE) { let mut post_sanitization_count: usize = 0; - let (transactions, priority_details): (Vec<_>, Vec<_>) = chunk + let (transactions, fee_budget_limits_vec): (Vec<_>, Vec<_>) = chunk .iter() .filter_map(|packet| { - packet - .build_sanitized_transaction(feature_set, vote_only, bank.as_ref()) - .map(|tx| (tx, packet.priority_details())) + packet.build_sanitized_transaction(feature_set, vote_only, bank.as_ref()) }) .inspect(|_| saturating_add_assign!(post_sanitization_count, 1)) - .filter(|(tx, _)| { + .filter(|tx| { SanitizedTransaction::validate_account_locks( tx.message(), transaction_account_lock_limit, ) .is_ok() }) + .filter_map(|tx| { + process_compute_budget_instructions(tx.message().program_instructions_iter()) + .map(|compute_budget| (tx, compute_budget.into())) + .ok() + }) .unzip(); let check_results = bank.check_transactions( @@ -335,16 +342,17 @@ impl SchedulerController { let post_lock_validation_count = transactions.len(); let mut post_transaction_check_count: usize = 0; - for ((transaction, priority_details), _) in transactions + for ((transaction, fee_budget_limits), _) in transactions .into_iter() - .zip(priority_details) + .zip(fee_budget_limits_vec) .zip(check_results) .filter(|(_, check_result)| check_result.0.is_ok()) { saturating_add_assign!(post_transaction_check_count, 1); let transaction_id = self.transaction_id_generator.next(); - let transaction_cost = CostModel::calculate_cost(&transaction, &bank.feature_set); + let (priority, cost) = + Self::calculate_priority_and_cost(&transaction, &fee_budget_limits, &bank); let transaction_ttl = SanitizedTransactionTTL { transaction, max_age_slot: last_slot_in_epoch, @@ -353,8 +361,8 @@ impl SchedulerController { if self.container.insert_new_transaction( transaction_id, transaction_ttl, - priority_details, - transaction_cost, + priority, + cost, ) { saturating_add_assign!(self.count_metrics.num_dropped_on_capacity, 1); } @@ -382,6 +390,51 @@ impl SchedulerController { ); } } + + /// Calculate priority and cost for a transaction: + /// + /// Cost is calculated through the `CostModel`, + /// and priority is calculated through a formula here that attempts to sell + /// blockspace to the highest bidder. + /// + /// The priority is calculated as: + /// P = R / (1 + C) + /// where P is the priority, R is the reward, + /// and C is the cost towards block-limits. + /// + /// Current minimum costs are on the order of several hundred, + /// so the denominator is effectively C, and the +1 is simply + /// to avoid any division by zero due to a bug - these costs + /// are calculated by the cost-model and are not direct + /// from user input. They should never be zero. + /// Any difference in the prioritization is negligible for + /// the current transaction costs. + fn calculate_priority_and_cost( + transaction: &SanitizedTransaction, + fee_budget_limits: &FeeBudgetLimits, + bank: &Bank, + ) -> (u64, u64) { + let cost = CostModel::calculate_cost(transaction, &bank.feature_set).sum(); + let fee = bank.fee_structure.calculate_fee( + transaction.message(), + 5_000, // this just needs to be non-zero + fee_budget_limits, + bank.feature_set + .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), + ); + + // We need a multiplier here to avoid rounding down too aggressively. + // For many transactions, the cost will be greater than the fees in terms of raw lamports. + // For the purposes of calculating prioritization, we multiply the fees by a large number so that + // the cost is a small fraction. + // An offset of 1 is used in the denominator to explicitly avoid division by zero. + const MULTIPLIER: u64 = 1_000_000; + ( + fee.saturating_mul(MULTIPLIER) + .saturating_div(cost.saturating_add(1)), + cost, + ) + } } #[derive(Default)] @@ -419,6 +472,10 @@ struct SchedulerCountMetrics { num_dropped_on_age_and_status: usize, /// Number of transactions that were dropped due to exceeded capacity. num_dropped_on_capacity: usize, + /// Min prioritization fees in the transaction container + min_prioritization_fees: u64, + /// Max prioritization fees in the transaction container + max_prioritization_fees: u64, } impl SchedulerCountMetrics { @@ -468,7 +525,9 @@ impl SchedulerCountMetrics { self.num_dropped_on_age_and_status, i64 ), - ("num_dropped_on_capacity", self.num_dropped_on_capacity, i64) + ("num_dropped_on_capacity", self.num_dropped_on_capacity, i64), + ("min_priority", self.get_min_priority(), i64), + ("max_priority", self.get_max_priority(), i64) ); } @@ -504,6 +563,38 @@ impl SchedulerCountMetrics { self.num_dropped_on_clear = 0; self.num_dropped_on_age_and_status = 0; self.num_dropped_on_capacity = 0; + self.min_prioritization_fees = u64::MAX; + self.max_prioritization_fees = 0; + } + + pub fn update_priority_stats(&mut self, min_max_fees: MinMaxResult) { + // update min/max priority + match min_max_fees { + itertools::MinMaxResult::NoElements => { + // do nothing + } + itertools::MinMaxResult::OneElement(e) => { + self.min_prioritization_fees = e; + self.max_prioritization_fees = e; + } + itertools::MinMaxResult::MinMax(min, max) => { + self.min_prioritization_fees = min; + self.max_prioritization_fees = max; + } + } + } + + pub fn get_min_priority(&self) -> u64 { + // to avoid getting u64::max recorded by metrics / in case of edge cases + if self.min_prioritization_fees != u64::MAX { + self.min_prioritization_fees + } else { + 0 + } + } + + pub fn get_max_priority(&self) -> u64 { + self.max_prioritization_fees } } @@ -680,7 +771,7 @@ mod tests { from_keypair: &Keypair, to_pubkey: &Pubkey, lamports: u64, - priority: u64, + compute_unit_price: u64, recent_blockhash: Hash, ) -> Transaction { // Fund the sending key, so that the transaction does not get filtered by the fee-payer check. @@ -695,7 +786,7 @@ mod tests { } let transfer = system_instruction::transfer(&from_keypair.pubkey(), to_pubkey, lamports); - let prioritization = ComputeBudgetInstruction::set_compute_unit_price(priority); + let prioritization = ComputeBudgetInstruction::set_compute_unit_price(compute_unit_price); let message = Message::new(&[transfer, prioritization], Some(&from_keypair.pubkey())); Transaction::new(&vec![from_keypair], message, recent_blockhash) } @@ -951,7 +1042,7 @@ mod tests { &Keypair::new(), &Pubkey::new_unique(), 1, - i, + i * 10, bank.last_blockhash(), ) }) diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state.rs b/core/src/banking_stage/transaction_scheduler/transaction_state.rs index 650ffa1cd3ce7e..727140545ab656 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state.rs @@ -1,8 +1,4 @@ -use { - solana_cost_model::transaction_cost::TransactionCost, - solana_runtime::transaction_priority_details::TransactionPriorityDetails, - solana_sdk::{slot_history::Slot, transaction::SanitizedTransaction}, -}; +use solana_sdk::{clock::Slot, transaction::SanitizedTransaction}; /// Simple wrapper type to tie a sanitized transaction to max age slot. pub(crate) struct SanitizedTransactionTTL { @@ -34,77 +30,38 @@ pub(crate) enum TransactionState { /// The transaction is available for scheduling. Unprocessed { transaction_ttl: SanitizedTransactionTTL, - transaction_priority_details: TransactionPriorityDetails, - transaction_cost: TransactionCost, - forwarded: bool, + priority: u64, + cost: u64, }, /// The transaction is currently scheduled or being processed. - Pending { - transaction_priority_details: TransactionPriorityDetails, - transaction_cost: TransactionCost, - forwarded: bool, - }, + Pending { priority: u64, cost: u64 }, } impl TransactionState { /// Creates a new `TransactionState` in the `Unprocessed` state. - pub(crate) fn new( - transaction_ttl: SanitizedTransactionTTL, - transaction_priority_details: TransactionPriorityDetails, - transaction_cost: TransactionCost, - ) -> Self { + pub(crate) fn new(transaction_ttl: SanitizedTransactionTTL, priority: u64, cost: u64) -> Self { Self::Unprocessed { transaction_ttl, - transaction_priority_details, - transaction_cost, - forwarded: false, - } - } - - /// Returns a reference to the priority details of the transaction. - pub(crate) fn transaction_priority_details(&self) -> &TransactionPriorityDetails { - match self { - Self::Unprocessed { - transaction_priority_details, - .. - } => transaction_priority_details, - Self::Pending { - transaction_priority_details, - .. - } => transaction_priority_details, + priority, + cost, } } - /// Returns a reference to the transaction cost of the transaction. - pub(crate) fn transaction_cost(&self) -> &TransactionCost { - match self { - Self::Unprocessed { - transaction_cost, .. - } => transaction_cost, - Self::Pending { - transaction_cost, .. - } => transaction_cost, - } - } - - /// Returns the priority of the transaction. + /// Return the priority of the transaction. + /// This is *not* the same as the `compute_unit_price` of the transaction. + /// The priority is used to order transactions for processing. pub(crate) fn priority(&self) -> u64 { - self.transaction_priority_details().priority - } - - /// Returns whether or not the transaction has already been forwarded. - pub(crate) fn forwarded(&self) -> bool { match self { - Self::Unprocessed { forwarded, .. } => *forwarded, - Self::Pending { forwarded, .. } => *forwarded, + Self::Unprocessed { priority, .. } => *priority, + Self::Pending { priority, .. } => *priority, } } - /// Sets the transaction as forwarded. - pub(crate) fn set_forwarded(&mut self) { + /// Return the cost of the transaction. + pub(crate) fn cost(&self) -> u64 { match self { - Self::Unprocessed { forwarded, .. } => *forwarded = true, - Self::Pending { forwarded, .. } => *forwarded = true, + Self::Unprocessed { cost, .. } => *cost, + Self::Pending { cost, .. } => *cost, } } @@ -119,15 +76,10 @@ impl TransactionState { match self.take() { TransactionState::Unprocessed { transaction_ttl, - transaction_priority_details, - transaction_cost, - forwarded, + priority, + cost, } => { - *self = TransactionState::Pending { - transaction_priority_details, - transaction_cost, - forwarded, - }; + *self = TransactionState::Pending { priority, cost }; transaction_ttl } TransactionState::Pending { .. } => { @@ -145,16 +97,11 @@ impl TransactionState { pub(crate) fn transition_to_unprocessed(&mut self, transaction_ttl: SanitizedTransactionTTL) { match self.take() { TransactionState::Unprocessed { .. } => panic!("already unprocessed"), - TransactionState::Pending { - transaction_priority_details, - transaction_cost, - forwarded, - } => { + TransactionState::Pending { priority, cost } => { *self = Self::Unprocessed { transaction_ttl, - transaction_priority_details, - transaction_cost, - forwarded, + priority, + cost, } } } @@ -179,14 +126,8 @@ impl TransactionState { core::mem::replace( self, Self::Pending { - transaction_priority_details: TransactionPriorityDetails { - priority: 0, - compute_unit_limit: 0, - }, - transaction_cost: TransactionCost::SimpleVote { - writable_accounts: vec![], - }, - forwarded: false, + priority: 0, + cost: 0, }, ) } @@ -196,14 +137,13 @@ impl TransactionState { mod tests { use { super::*, - solana_cost_model::transaction_cost::UsageCostDetails, solana_sdk::{ compute_budget::ComputeBudgetInstruction, hash::Hash, message::Message, signature::Keypair, signer::Signer, system_instruction, transaction::Transaction, }, }; - fn create_transaction_state(priority: u64) -> TransactionState { + fn create_transaction_state(compute_unit_price: u64) -> TransactionState { let from_keypair = Keypair::new(); let ixs = vec![ system_instruction::transfer( @@ -211,28 +151,17 @@ mod tests { &solana_sdk::pubkey::new_rand(), 1, ), - ComputeBudgetInstruction::set_compute_unit_price(priority), + ComputeBudgetInstruction::set_compute_unit_price(compute_unit_price), ]; let message = Message::new(&ixs, Some(&from_keypair.pubkey())); let tx = Transaction::new(&[&from_keypair], message, Hash::default()); - let transaction_cost = TransactionCost::Transaction(UsageCostDetails { - signature_cost: 5000, - ..UsageCostDetails::default() - }); let transaction_ttl = SanitizedTransactionTTL { transaction: SanitizedTransaction::from_transaction_for_tests(tx), max_age_slot: Slot::MAX, }; - - TransactionState::new( - transaction_ttl, - TransactionPriorityDetails { - priority, - compute_unit_limit: 0, - }, - transaction_cost, - ) + const TEST_TRANSACTION_COST: u64 = 5000; + TransactionState::new(transaction_ttl, compute_unit_price, TEST_TRANSACTION_COST) } #[test] @@ -294,12 +223,12 @@ mod tests { } #[test] - fn test_transaction_priority_details() { + fn test_priority() { let priority = 15; let mut transaction_state = create_transaction_state(priority); assert_eq!(transaction_state.priority(), priority); - // ensure priority is not lost through state transitions + // ensure compute unit price is not lost through state transitions let transaction_ttl = transaction_state.transition_to_pending(); assert_eq!(transaction_state.priority(), priority); transaction_state.transition_to_unprocessed(transaction_ttl); diff --git a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs index d7d79cb21b7c32..a627375a03e6ba 100644 --- a/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs +++ b/core/src/banking_stage/transaction_scheduler/transaction_state_container.rs @@ -4,9 +4,8 @@ use { transaction_state::{SanitizedTransactionTTL, TransactionState}, }, crate::banking_stage::scheduler_messages::TransactionId, + itertools::MinMaxResult, min_max_heap::MinMaxHeap, - solana_cost_model::transaction_cost::TransactionCost, - solana_runtime::transaction_priority_details::TransactionPriorityDetails, std::collections::HashMap, }; @@ -98,18 +97,13 @@ impl TransactionStateContainer { &mut self, transaction_id: TransactionId, transaction_ttl: SanitizedTransactionTTL, - transaction_priority_details: TransactionPriorityDetails, - transaction_cost: TransactionCost, + priority: u64, + cost: u64, ) -> bool { - let priority_id = - TransactionPriorityId::new(transaction_priority_details.priority, transaction_id); + let priority_id = TransactionPriorityId::new(priority, transaction_id); self.id_to_transaction_state.insert( transaction_id, - TransactionState::new( - transaction_ttl, - transaction_priority_details, - transaction_cost, - ), + TransactionState::new(transaction_ttl, priority, cost), ); self.push_id_into_queue(priority_id) } @@ -149,16 +143,24 @@ impl TransactionStateContainer { .remove(id) .expect("transaction must exist"); } + + pub(crate) fn get_min_max_priority(&self) -> MinMaxResult { + match self.priority_queue.peek_min() { + Some(min) => match self.priority_queue.peek_max() { + Some(max) => MinMaxResult::MinMax(min.priority, max.priority), + None => MinMaxResult::OneElement(min.priority), + }, + None => MinMaxResult::NoElements, + } + } } #[cfg(test)] mod tests { use { super::*, - solana_cost_model::cost_model::CostModel, solana_sdk::{ compute_budget::ComputeBudgetInstruction, - feature_set::FeatureSet, hash::Hash, message::Message, signature::Keypair, @@ -169,13 +171,8 @@ mod tests { }, }; - fn test_transaction( - priority: u64, - ) -> ( - SanitizedTransactionTTL, - TransactionPriorityDetails, - TransactionCost, - ) { + /// Returns (transaction_ttl, priority, cost) + fn test_transaction(priority: u64) -> (SanitizedTransactionTTL, u64, u64) { let from_keypair = Keypair::new(); let ixs = vec![ system_instruction::transfer( @@ -191,31 +188,23 @@ mod tests { message, Hash::default(), )); - let transaction_cost = CostModel::calculate_cost(&tx, &FeatureSet::default()); let transaction_ttl = SanitizedTransactionTTL { transaction: tx, max_age_slot: Slot::MAX, }; - ( - transaction_ttl, - TransactionPriorityDetails { - priority, - compute_unit_limit: 0, - }, - transaction_cost, - ) + const TEST_TRANSACTION_COST: u64 = 5000; + (transaction_ttl, priority, TEST_TRANSACTION_COST) } fn push_to_container(container: &mut TransactionStateContainer, num: usize) { for id in 0..num as u64 { let priority = id; - let (transaction_ttl, transaction_priority_details, transaction_cost) = - test_transaction(priority); + let (transaction_ttl, priority, cost) = test_transaction(priority); container.insert_new_transaction( TransactionId::new(id), transaction_ttl, - transaction_priority_details, - transaction_cost, + priority, + cost, ); } } diff --git a/core/src/banking_stage/unprocessed_packet_batches.rs b/core/src/banking_stage/unprocessed_packet_batches.rs index ff323ef25f18ee..b87cfef291b991 100644 --- a/core/src/banking_stage/unprocessed_packet_batches.rs +++ b/core/src/banking_stage/unprocessed_packet_batches.rs @@ -49,8 +49,8 @@ impl PartialOrd for DeserializedPacket { impl Ord for DeserializedPacket { fn cmp(&self, other: &Self) -> Ordering { self.immutable_section() - .priority() - .cmp(&other.immutable_section().priority()) + .compute_unit_price() + .cmp(&other.immutable_section().compute_unit_price()) } } @@ -193,6 +193,18 @@ impl UnprocessedPacketBatches { self.packet_priority_queue.is_empty() } + pub fn get_min_compute_unit_price(&self) -> Option { + self.packet_priority_queue + .peek_min() + .map(|x| x.compute_unit_price()) + } + + pub fn get_max_compute_unit_price(&self) -> Option { + self.packet_priority_queue + .peek_max() + .map(|x| x.compute_unit_price()) + } + fn push_internal(&mut self, deserialized_packet: DeserializedPacket) { // Push into the priority queue self.packet_priority_queue @@ -317,12 +329,15 @@ mod tests { DeserializedPacket::new(packet).unwrap() } - fn packet_with_priority_details(priority: u64, compute_unit_limit: u64) -> DeserializedPacket { + fn packet_with_compute_budget_details( + compute_unit_price: u64, + compute_unit_limit: u64, + ) -> DeserializedPacket { let from_account = solana_sdk::pubkey::new_rand(); let tx = Transaction::new_unsigned(Message::new( &[ ComputeBudgetInstruction::set_compute_unit_limit(compute_unit_limit as u32), - ComputeBudgetInstruction::set_compute_unit_price(priority), + ComputeBudgetInstruction::set_compute_unit_price(compute_unit_price), system_instruction::transfer(&from_account, &solana_sdk::pubkey::new_rand(), 1), ], Some(&from_account), @@ -348,10 +363,10 @@ mod tests { #[test] fn test_unprocessed_packet_batches_insert_minimum_packet_over_capacity() { let heavier_packet_weight = 2; - let heavier_packet = packet_with_priority_details(heavier_packet_weight, 200_000); + let heavier_packet = packet_with_compute_budget_details(heavier_packet_weight, 200_000); let lesser_packet_weight = heavier_packet_weight - 1; - let lesser_packet = packet_with_priority_details(lesser_packet_weight, 200_000); + let lesser_packet = packet_with_compute_budget_details(lesser_packet_weight, 200_000); // Test that the heavier packet is actually heavier let mut unprocessed_packet_batches = UnprocessedPacketBatches::with_capacity(2); diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index 257bf1b141975b..adfb11f0b28fc2 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -17,13 +17,13 @@ use { }, itertools::Itertools, min_max_heap::MinMaxHeap, - solana_accounts_db::transaction_error_metrics::TransactionErrorMetrics, solana_measure::{measure, measure_us}, solana_runtime::bank::Bank, solana_sdk::{ clock::FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, feature_set::FeatureSet, hash::Hash, saturating_add_assign, transaction::SanitizedTransaction, }, + solana_svm::transaction_error_metrics::TransactionErrorMetrics, std::{ collections::HashMap, sync::{atomic::Ordering, Arc}, @@ -282,6 +282,24 @@ impl UnprocessedTransactionStorage { } } + pub fn get_min_priority(&self) -> Option { + match self { + Self::VoteStorage(_) => None, + Self::LocalTransactionStorage(transaction_storage) => { + transaction_storage.get_min_compute_unit_price() + } + } + } + + pub fn get_max_priority(&self) -> Option { + match self { + Self::VoteStorage(_) => None, + Self::LocalTransactionStorage(transaction_storage) => { + transaction_storage.get_max_compute_unit_price() + } + } + } + /// Returns the maximum number of packets a receive should accept pub fn max_receive_size(&self) -> usize { match self { @@ -529,6 +547,14 @@ impl ThreadLocalUnprocessedPackets { self.unprocessed_packet_batches.len() } + pub fn get_min_compute_unit_price(&self) -> Option { + self.unprocessed_packet_batches.get_min_compute_unit_price() + } + + pub fn get_max_compute_unit_price(&self) -> Option { + self.unprocessed_packet_batches.get_max_compute_unit_price() + } + fn max_receive_size(&self) -> usize { self.unprocessed_packet_batches.capacity() - self.unprocessed_packet_batches.len() } @@ -749,7 +775,6 @@ impl ThreadLocalUnprocessedPackets { }) .unzip(); - inc_new_counter_info!("banking_stage-packet_conversion", 1); let filtered_count = packets_to_process.len().saturating_sub(transactions.len()); saturating_add_assign!(*total_dropped_packets, filtered_count); diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 54312baf30d9ec..4f129b18282218 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -210,6 +210,17 @@ impl TowerVersions { } } +#[derive(PartialEq, Eq, Debug, Default, Clone, Copy, AbiExample)] +pub(crate) enum BlockhashStatus { + /// No vote since restart + #[default] + Uninitialized, + /// Non voting validator + NonVoting, + /// Successfully generated vote tx with blockhash + Blockhash(Hash), +} + #[frozen_abi(digest = "iZi6s9BvytU3HbRsibrAD71jwMLvrqHdCjVk6qKcVvd")] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, AbiExample)] pub struct Tower { @@ -223,8 +234,8 @@ pub struct Tower { // blockhash of the voted block itself, depending if the vote slot was refreshed. // For instance, a vote for slot 5, may be refreshed/resubmitted for inclusion in // block 10, in which case `last_vote_tx_blockhash` equals the blockhash of 10, not 5. - // For non voting validators this is None - last_vote_tx_blockhash: Option, + // For non voting validators this is NonVoting + last_vote_tx_blockhash: BlockhashStatus, last_timestamp: BlockTimestamp, #[serde(skip)] // Restored last voted slot which cannot be found in SlotHistory at replayed root @@ -247,7 +258,7 @@ impl Default for Tower { vote_state: VoteState::default(), last_vote: VoteTransaction::from(VoteStateUpdate::default()), last_timestamp: BlockTimestamp::default(), - last_vote_tx_blockhash: None, + last_vote_tx_blockhash: BlockhashStatus::default(), stray_restored_slot: Option::default(), last_switch_threshold_check: Option::default(), }; @@ -486,7 +497,7 @@ impl Tower { self.vote_state.tower() } - pub fn last_vote_tx_blockhash(&self) -> Option { + pub(crate) fn last_vote_tx_blockhash(&self) -> BlockhashStatus { self.last_vote_tx_blockhash } @@ -530,7 +541,11 @@ impl Tower { } pub fn refresh_last_vote_tx_blockhash(&mut self, new_vote_tx_blockhash: Hash) { - self.last_vote_tx_blockhash = Some(new_vote_tx_blockhash); + self.last_vote_tx_blockhash = BlockhashStatus::Blockhash(new_vote_tx_blockhash); + } + + pub(crate) fn mark_last_vote_tx_blockhash_non_voting(&mut self) { + self.last_vote_tx_blockhash = BlockhashStatus::NonVoting; } pub fn last_voted_slot_in_bank(bank: &Bank, vote_account_pubkey: &Pubkey) -> Option { diff --git a/core/src/consensus/tower1_14_11.rs b/core/src/consensus/tower1_14_11.rs index befce935034eff..22c396e0975e59 100644 --- a/core/src/consensus/tower1_14_11.rs +++ b/core/src/consensus/tower1_14_11.rs @@ -1,6 +1,6 @@ use { - crate::consensus::SwitchForkDecision, - solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey}, + crate::consensus::{BlockhashStatus, SwitchForkDecision}, + solana_sdk::{clock::Slot, pubkey::Pubkey}, solana_vote_program::vote_state::{ vote_state_1_14_11::VoteState1_14_11, BlockTimestamp, VoteTransaction, }, @@ -19,7 +19,7 @@ pub struct Tower1_14_11 { // blockhash of the voted block itself, depending if the vote slot was refreshed. // For instance, a vote for slot 5, may be refreshed/resubmitted for inclusion in // block 10, in which case `last_vote_tx_blockhash` equals the blockhash of 10, not 5. - pub(crate) last_vote_tx_blockhash: Option, + pub(crate) last_vote_tx_blockhash: BlockhashStatus, pub(crate) last_timestamp: BlockTimestamp, #[serde(skip)] // Restored last voted slot which cannot be found in SlotHistory at replayed root diff --git a/core/src/consensus/tower1_7_14.rs b/core/src/consensus/tower1_7_14.rs index 62e5870b4efbb6..725b781924d8c9 100644 --- a/core/src/consensus/tower1_7_14.rs +++ b/core/src/consensus/tower1_7_14.rs @@ -1,8 +1,7 @@ use { - crate::consensus::{Result, SwitchForkDecision, TowerError}, + crate::consensus::{BlockhashStatus, Result, SwitchForkDecision, TowerError}, solana_sdk::{ clock::Slot, - hash::Hash, pubkey::Pubkey, signature::{Signature, Signer}, }, @@ -22,7 +21,7 @@ pub struct Tower1_7_14 { // blockhash of the voted block itself, depending if the vote slot was refreshed. // For instance, a vote for slot 5, may be refreshed/resubmitted for inclusion in // block 10, in which case `last_vote_tx_blockhash` equals the blockhash of 10, not 5. - pub(crate) last_vote_tx_blockhash: Option, + pub(crate) last_vote_tx_blockhash: BlockhashStatus, pub(crate) last_timestamp: BlockTimestamp, #[serde(skip)] // Restored last voted slot which cannot be found in SlotHistory at replayed root diff --git a/core/src/consensus/tower_storage.rs b/core/src/consensus/tower_storage.rs index 61f3c07245105c..1e81f28f47ce46 100644 --- a/core/src/consensus/tower_storage.rs +++ b/core/src/consensus/tower_storage.rs @@ -372,7 +372,7 @@ pub mod test { super::*, crate::consensus::{ tower1_7_14::{SavedTower1_7_14, Tower1_7_14}, - Tower, + BlockhashStatus, Tower, }, solana_sdk::{hash::Hash, signature::Keypair}, solana_vote_program::vote_state::{ @@ -403,7 +403,7 @@ pub mod test { vote_state: VoteState1_14_11::from(vote_state), last_vote: vote.clone(), last_timestamp: BlockTimestamp::default(), - last_vote_tx_blockhash: None, + last_vote_tx_blockhash: BlockhashStatus::Uninitialized, stray_restored_slot: Some(2), last_switch_threshold_check: Option::default(), }; diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 0ba4cdc505e154..27c30b9e52eb7b 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -15,8 +15,8 @@ use { latest_validator_votes_for_frozen_banks::LatestValidatorVotesForFrozenBanks, progress_map::{ForkProgress, ProgressMap, PropagatedStats, ReplaySlotStats}, tower_storage::{SavedTower, SavedTowerVersions, TowerStorage}, - ComputedBankState, Stake, SwitchForkDecision, ThresholdDecision, Tower, VotedStakes, - SWITCH_FORK_THRESHOLD, + BlockhashStatus, ComputedBankState, Stake, SwitchForkDecision, ThresholdDecision, + Tower, VotedStakes, SWITCH_FORK_THRESHOLD, }, cost_update_service::CostUpdate, repair::{ @@ -137,6 +137,20 @@ enum ConfirmationType { DuplicateConfirmed, } +enum GenerateVoteTxResult { + // non voting validator, not eligible for refresh + NonVoting, + // failed generation, eligible for refresh + Failed, + Tx(Transaction), +} + +impl GenerateVoteTxResult { + fn is_non_voting(&self) -> bool { + matches!(self, Self::NonVoting) + } +} + #[derive(PartialEq, Eq, Debug)] struct ConfirmedSlot { slot: Slot, @@ -2321,18 +2335,18 @@ impl ReplayStage { vote_signatures: &mut Vec, has_new_vote_been_rooted: bool, wait_to_vote_slot: Option, - ) -> Option { + ) -> GenerateVoteTxResult { if !bank.is_startup_verification_complete() { info!("startup verification incomplete, so unable to vote"); - return None; + return GenerateVoteTxResult::Failed; } if authorized_voter_keypairs.is_empty() { - return None; + return GenerateVoteTxResult::NonVoting; } if let Some(slot) = wait_to_vote_slot { if bank.slot() < slot { - return None; + return GenerateVoteTxResult::Failed; } } let vote_account = match bank.get_vote_account(vote_account_pubkey) { @@ -2341,7 +2355,7 @@ impl ReplayStage { "Vote account {} does not exist. Unable to vote", vote_account_pubkey, ); - return None; + return GenerateVoteTxResult::Failed; } Some(vote_account) => vote_account, }; @@ -2352,7 +2366,7 @@ impl ReplayStage { "Vote account {} is unreadable. Unable to vote", vote_account_pubkey, ); - return None; + return GenerateVoteTxResult::Failed; } Ok(vote_state) => vote_state, }; @@ -2363,7 +2377,7 @@ impl ReplayStage { vote_state.node_pubkey, node_keypair.pubkey() ); - return None; + return GenerateVoteTxResult::Failed; } let Some(authorized_voter_pubkey) = vote_state.get_authorized_voter(bank.epoch()) else { @@ -2372,7 +2386,7 @@ impl ReplayStage { vote_account_pubkey, bank.epoch() ); - return None; + return GenerateVoteTxResult::Failed; }; let authorized_voter_keypair = match authorized_voter_keypairs @@ -2382,7 +2396,7 @@ impl ReplayStage { None => { warn!("The authorized keypair {} for vote account {} is not available. Unable to vote", authorized_voter_pubkey, vote_account_pubkey); - return None; + return GenerateVoteTxResult::NonVoting; } Some(authorized_voter_keypair) => authorized_voter_keypair, }; @@ -2418,7 +2432,7 @@ impl ReplayStage { vote_signatures.clear(); } - Some(vote_tx) + GenerateVoteTxResult::Tx(vote_tx) } #[allow(clippy::too_many_arguments)] @@ -2457,13 +2471,23 @@ impl ReplayStage { // If we are a non voting validator or have an incorrect setup preventing us from // generating vote txs, no need to refresh - let Some(last_vote_tx_blockhash) = tower.last_vote_tx_blockhash() else { - return; + let last_vote_tx_blockhash = match tower.last_vote_tx_blockhash() { + // Since the checks in vote generation are deterministic, if we were non voting + // on the original vote, the refresh will also fail. No reason to refresh. + BlockhashStatus::NonVoting => return, + // In this case we have not voted since restart, it is unclear if we are non voting. + // Attempt to refresh. + BlockhashStatus::Uninitialized => None, + // Refresh if the blockhash is expired + BlockhashStatus::Blockhash(blockhash) => Some(blockhash), }; if my_latest_landed_vote >= last_voted_slot - || heaviest_bank_on_same_fork - .is_hash_valid_for_age(&last_vote_tx_blockhash, MAX_PROCESSING_AGE) + || { + last_vote_tx_blockhash.is_some() + && heaviest_bank_on_same_fork + .is_hash_valid_for_age(&last_vote_tx_blockhash.unwrap(), MAX_PROCESSING_AGE) + } || { // In order to avoid voting on multiple forks all past MAX_PROCESSING_AGE that don't // include the last voted blockhash @@ -2480,7 +2504,7 @@ impl ReplayStage { // Update timestamp for refreshed vote tower.refresh_last_vote_timestamp(heaviest_bank_on_same_fork.slot()); - let vote_tx = Self::generate_vote_tx( + let vote_tx_result = Self::generate_vote_tx( identity_keypair, heaviest_bank_on_same_fork, vote_account_pubkey, @@ -2492,7 +2516,7 @@ impl ReplayStage { wait_to_vote_slot, ); - if let Some(vote_tx) = vote_tx { + if let GenerateVoteTxResult::Tx(vote_tx) = vote_tx_result { let recent_blockhash = vote_tx.message.recent_blockhash; tower.refresh_last_vote_tx_blockhash(recent_blockhash); @@ -2511,6 +2535,8 @@ impl ReplayStage { }) .unwrap_or_else(|err| warn!("Error: {:?}", err)); last_vote_refresh_time.last_refresh_time = Instant::now(); + } else if vote_tx_result.is_non_voting() { + tower.mark_last_vote_tx_blockhash_non_voting(); } } @@ -2529,7 +2555,7 @@ impl ReplayStage { wait_to_vote_slot: Option, ) { let mut generate_time = Measure::start("generate_vote"); - let vote_tx = Self::generate_vote_tx( + let vote_tx_result = Self::generate_vote_tx( identity_keypair, bank, vote_account_pubkey, @@ -2542,7 +2568,7 @@ impl ReplayStage { ); generate_time.stop(); replay_timing.generate_vote_us += generate_time.as_us(); - if let Some(vote_tx) = vote_tx { + if let GenerateVoteTxResult::Tx(vote_tx) = vote_tx_result { tower.refresh_last_vote_tx_blockhash(vote_tx.message.recent_blockhash); let saved_tower = SavedTower::new(tower, identity_keypair).unwrap_or_else(|err| { @@ -2558,6 +2584,8 @@ impl ReplayStage { saved_tower: SavedTowerVersions::from(saved_tower), }) .unwrap_or_else(|err| warn!("Error: {:?}", err)); + } else if vote_tx_result.is_non_voting() { + tower.mark_last_vote_tx_blockhash_non_voting(); } } @@ -7480,8 +7508,8 @@ pub(crate) mod tests { let vote_tx = &votes[0]; assert_eq!(vote_tx.message.recent_blockhash, bank0.last_blockhash()); assert_eq!( - tower.last_vote_tx_blockhash().unwrap(), - bank0.last_blockhash() + tower.last_vote_tx_blockhash(), + BlockhashStatus::Blockhash(bank0.last_blockhash()) ); assert_eq!(tower.last_voted_slot().unwrap(), 0); bank1.process_transaction(vote_tx).unwrap(); @@ -7517,8 +7545,8 @@ pub(crate) mod tests { assert!(votes.is_empty()); // Tower's latest vote tx blockhash hasn't changed either assert_eq!( - tower.last_vote_tx_blockhash().unwrap(), - bank0.last_blockhash() + tower.last_vote_tx_blockhash(), + BlockhashStatus::Blockhash(bank0.last_blockhash()) ); assert_eq!(tower.last_voted_slot().unwrap(), 0); } @@ -7553,8 +7581,8 @@ pub(crate) mod tests { let vote_tx = &votes[0]; assert_eq!(vote_tx.message.recent_blockhash, bank1.last_blockhash()); assert_eq!( - tower.last_vote_tx_blockhash().unwrap(), - bank1.last_blockhash() + tower.last_vote_tx_blockhash(), + BlockhashStatus::Blockhash(bank1.last_blockhash()) ); assert_eq!(tower.last_voted_slot().unwrap(), 1); @@ -7578,8 +7606,8 @@ pub(crate) mod tests { let votes = cluster_info.get_votes(&mut cursor); assert!(votes.is_empty()); assert_eq!( - tower.last_vote_tx_blockhash().unwrap(), - bank1.last_blockhash() + tower.last_vote_tx_blockhash(), + BlockhashStatus::Blockhash(bank1.last_blockhash()) ); assert_eq!(tower.last_voted_slot().unwrap(), 1); @@ -7641,8 +7669,8 @@ pub(crate) mod tests { expired_bank.last_blockhash() ); assert_eq!( - tower.last_vote_tx_blockhash().unwrap(), - expired_bank.last_blockhash() + tower.last_vote_tx_blockhash(), + BlockhashStatus::Blockhash(expired_bank.last_blockhash()) ); assert_eq!(tower.last_voted_slot().unwrap(), 1); @@ -7700,8 +7728,8 @@ pub(crate) mod tests { expired_bank.last_blockhash() ); assert_eq!( - tower.last_vote_tx_blockhash().unwrap(), - expired_bank.last_blockhash() + tower.last_vote_tx_blockhash(), + BlockhashStatus::Blockhash(expired_bank.last_blockhash()) ); assert_eq!(tower.last_voted_slot().unwrap(), 1); } @@ -7758,8 +7786,8 @@ pub(crate) mod tests { parent_bank.last_blockhash() ); assert_eq!( - tower.last_vote_tx_blockhash().unwrap(), - parent_bank.last_blockhash() + tower.last_vote_tx_blockhash(), + BlockhashStatus::Blockhash(parent_bank.last_blockhash()) ); assert_eq!(tower.last_voted_slot().unwrap(), parent_bank.slot()); let bank = new_bank_from_parent_with_bank_forks( diff --git a/core/src/rewards_recorder_service.rs b/core/src/rewards_recorder_service.rs index f78b8bab260b65..3fc2c8dc5b5149 100644 --- a/core/src/rewards_recorder_service.rs +++ b/core/src/rewards_recorder_service.rs @@ -1,8 +1,7 @@ use { crossbeam_channel::{Receiver, RecvTimeoutError, Sender}, - solana_accounts_db::stake_rewards::RewardInfo, solana_ledger::blockstore::Blockstore, - solana_sdk::{clock::Slot, pubkey::Pubkey}, + solana_sdk::{clock::Slot, pubkey::Pubkey, reward_info::RewardInfo}, solana_transaction_status::Reward, std::{ sync::{ diff --git a/core/src/validator.rs b/core/src/validator.rs index 11fecf9f1313b6..d4387320f6ca82 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -35,6 +35,7 @@ use { accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, hardened_unpack::{open_genesis_config, MAX_GENESIS_ARCHIVE_UNPACKED_SIZE}, + utils::{move_and_async_delete_path, move_and_async_delete_path_contents}, }, solana_client::connection_cache::{ConnectionCache, Protocol}, solana_entry::poh::compute_hash_time_ns, @@ -95,14 +96,11 @@ use { bank_forks::BankForks, commitment::BlockCommitmentCache, prioritization_fee_cache::PrioritizationFeeCache, - runtime_config::RuntimeConfig, snapshot_archive_info::SnapshotArchiveInfoGetter, snapshot_bank_utils::{self, DISABLED_SNAPSHOT_ARCHIVE_INTERVAL}, snapshot_config::SnapshotConfig, snapshot_hash::StartingSnapshotHashes, - snapshot_utils::{ - self, clean_orphaned_account_snapshot_dirs, move_and_async_delete_path_contents, - }, + snapshot_utils::{self, clean_orphaned_account_snapshot_dirs}, }, solana_sdk::{ clock::Slot, @@ -117,6 +115,7 @@ use { }, solana_send_transaction_service::send_transaction_service, solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes}, + solana_svm::runtime_config::RuntimeConfig, solana_turbine::{self, broadcast_stage::BroadcastStageType}, solana_unified_scheduler_pool::DefaultSchedulerPool, solana_vote_program::vote_state, @@ -210,7 +209,6 @@ pub struct ValidatorConfig { pub voting_disabled: bool, pub account_paths: Vec, pub account_snapshot_paths: Vec, - pub account_shrink_paths: Option>, pub rpc_config: JsonRpcConfig, /// Specifies which plugins to start up with pub on_start_geyser_plugin_config_files: Option>, @@ -282,7 +280,6 @@ impl Default for ValidatorConfig { max_ledger_shreds: None, account_paths: Vec::new(), account_snapshot_paths: Vec::new(), - account_shrink_paths: None, rpc_config: JsonRpcConfig::default(), on_start_geyser_plugin_config_files: None, rpc_addrs: None, @@ -630,7 +627,7 @@ impl Validator { ]; for old_accounts_hash_cache_dir in old_accounts_hash_cache_dirs { if old_accounts_hash_cache_dir.exists() { - snapshot_utils::move_and_async_delete_path(old_accounts_hash_cache_dir); + move_and_async_delete_path(old_accounts_hash_cache_dir); } } @@ -1853,7 +1850,6 @@ fn load_blockstore( &genesis_config, &blockstore, config.account_paths.clone(), - config.account_shrink_paths.clone(), Some(&config.snapshot_config), &process_options, transaction_history_services @@ -1880,11 +1876,6 @@ fn load_blockstore( let mut bank_forks = bank_forks.write().unwrap(); bank_forks.set_snapshot_config(Some(config.snapshot_config.clone())); bank_forks.set_accounts_hash_interval_slots(config.accounts_hash_interval_slots); - if let Some(ref shrink_paths) = config.account_shrink_paths { - bank_forks - .working_bank() - .set_shrink_paths(shrink_paths.clone()); - } } Ok(( @@ -2463,12 +2454,16 @@ fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: boo } fn cleanup_accounts_paths(config: &ValidatorConfig) { - for accounts_path in &config.account_paths { - move_and_async_delete_path_contents(accounts_path); + for account_path in &config.account_paths { + move_and_async_delete_path_contents(account_path); } - if let Some(ref shrink_paths) = config.account_shrink_paths { - for accounts_path in shrink_paths { - move_and_async_delete_path_contents(accounts_path); + if let Some(shrink_paths) = config + .accounts_db_config + .as_ref() + .and_then(|config| config.shrink_paths.as_ref()) + { + for shrink_path in shrink_paths { + move_and_async_delete_path_contents(shrink_path); } } } diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index 106539034a2a81..b0dd111676af79 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -23,7 +23,6 @@ use { bank::{epoch_accounts_hash_utils, Bank}, bank_forks::BankForks, genesis_utils::{self, GenesisConfigInfo}, - runtime_config::RuntimeConfig, snapshot_archive_info::SnapshotArchiveInfoGetter, snapshot_bank_utils, snapshot_config::SnapshotConfig, @@ -39,6 +38,7 @@ use { timing::timestamp, }, solana_streamer::socket::SocketAddrSpace, + solana_svm::runtime_config::RuntimeConfig, std::{ mem::ManuallyDrop, sync::{ diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index a44c63fec66da9..2694f7294a7217 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -25,7 +25,6 @@ use { bank::Bank, bank_forks::BankForks, genesis_utils::{create_genesis_config_with_leader, GenesisConfigInfo}, - runtime_config::RuntimeConfig, snapshot_archive_info::FullSnapshotArchiveInfo, snapshot_bank_utils::{self, DISABLED_SNAPSHOT_ARCHIVE_INTERVAL}, snapshot_config::SnapshotConfig, @@ -50,6 +49,7 @@ use { timing::timestamp, }, solana_streamer::socket::SocketAddrSpace, + solana_svm::runtime_config::RuntimeConfig, std::{ collections::HashSet, fs, diff --git a/docs/README.md b/docs/README.md index ceff97a78db556..0e002b6ac75dcf 100644 --- a/docs/README.md +++ b/docs/README.md @@ -63,8 +63,12 @@ npm run start ## Translations -Translations are sourced from [Crowdin](https://docusaurus.io/docs/i18n/crowdin) and generated when `master` is built. -For local development use the following two commands in this `docs` directory. +Translations are sourced from [Crowdin](https://docusaurus.io/docs/i18n/crowdin) +and generated when the branch noted as the `STABLE` channel is built via the +`build.sh` script. + +For local development, and with the `CROWDIN_PERSONAL_TOKEN` env variable set, +use the following two commands in this `docs` directory. To download the newest documentation translations run: @@ -72,12 +76,45 @@ To download the newest documentation translations run: npm run crowdin:download ``` -To upload changes from `src` & generate [explicit IDs](https://docusaurus.io/docs/markdown-features/headings#explicit-ids): +To upload changes from `src` & generate +[explicit IDs](https://docusaurus.io/docs/markdown-features/headings#explicit-ids): ```shell npm run crowdin:upload ``` +> Translations are only included when deploying the `STABLE` channel of the docs +> (via `build.sh`). Resulting in only the `docs.solanalabs.com` documentation +> site to include translated content. Therefore, the `edge` and `beta` docs +> sites are not expected to include translated content, even though the language +> selector will still be present. + +### Common issues + +#### `CROWDIN_PERSONAL_TOKEN` env variable + +The `crowdin.yml` file requires a `CROWDIN_PERSONAL_TOKEN` env variable to be +set with a valid Crowdin access token. + +For local development, you can store this in a `.env` file that the Crowdin CLI +will auto detect. + +For building and publishing via the GitHub actions, the `CROWDIN_PERSONAL_TOKEN` +secret must be set. + +#### Translation locale fails to build with `SyntaxError` + +Some translation locales may fail to build with a `SyntaxError` thrown by +Docusaurus due to how certain language symbols get parsed by Docusaurus while +generating the static version of the docs. + +> Note: When any locale fails to build, the entire docs build will fail +> resulting in the docs not being able to be deployed at all. + +There are several known locales that fail to build the current documentation. +They are listed in the commented out `localesNotBuilding` attribute in the +[`docusaurus.config.js`](https://github.com/solana-labs/solana/blob/master/docs/docusaurus.config.js) + ## CI Build Flow The docs are built and published in Travis CI with the `./build.sh` script. On each PR, the docs are built, but not published. diff --git a/docs/build-cli-usage.sh b/docs/build-cli-usage.sh index 0917cb4737af9f..8e6090474f10fc 100755 --- a/docs/build-cli-usage.sh +++ b/docs/build-cli-usage.sh @@ -58,6 +58,6 @@ in_subcommands=0 while read -r subcommand rest; do [[ $subcommand == "SUBCOMMANDS:" ]] && in_subcommands=1 && continue if ((in_subcommands)); then - section "$(cargo -q run -p solana-cli -- help "$subcommand" | sed -e 's|'"$HOME"'|~|g' -e 's/[[:space:]]\+$//')" "####" >> "$out" + section "$(cargo -q run -p solana-cli -- help "$subcommand" | sed -e 's|'"$HOME"'|~|g' -e 's/[[:space:]]\+$//')" "###" >> "$out" fi done <<<"$usage">>"$out" diff --git a/docs/build.sh b/docs/build.sh index 5cb2ee6eebf7a7..6269eabdbb78b0 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -6,12 +6,10 @@ cd "$(dirname "$0")" # shellcheck source=ci/env.sh source ../ci/env.sh -: "${rust_stable_docker_image:=}" # Pacify shellcheck - # shellcheck source=ci/rust-version.sh source ../ci/rust-version.sh -../ci/docker-run.sh "$rust_stable_docker_image" docs/build-cli-usage.sh -../ci/docker-run.sh "$rust_stable_docker_image" docs/convert-ascii-to-svg.sh +../ci/docker-run-default-image.sh docs/build-cli-usage.sh +../ci/docker-run-default-image.sh docs/convert-ascii-to-svg.sh ./set-solana-release-tag.sh # Get current channel diff --git a/docs/crowdin.yml b/docs/crowdin.yml index 4a14b0899569a3..a8d31e9e7ec099 100644 --- a/docs/crowdin.yml +++ b/docs/crowdin.yml @@ -4,13 +4,22 @@ base_url: 'https://solana.crowdin.com' preserve_hierarchy: true files: [ # JSON translation files - # { - # source: '/i18n/en/**/*', - # translation: '/i18n/%two_letters_code%/**/%original_file_name%', - #}, + { + source: '/i18n/en/**/*', + translation: '/i18n/%two_letters_code%/**/%original_file_name%', + }, # Docs Markdown files { source: '/src/**/*.md', translation: '/i18n/%two_letters_code%/docusaurus-plugin-content-docs/current/**/%original_file_name%', }, + { + source: '/src/**/*.mdx', + translation: '/i18n/%two_letters_code%/docusaurus-plugin-content-docs/current/**/%original_file_name%', + }, + # Custom sidebar category files + { + source: '/src/**/*.json', + translation: '/i18n/%two_letters_code%/docusaurus-plugin-content-docs/current/**/%original_file_name%', + }, ] diff --git a/docs/package-lock.json b/docs/package-lock.json index 976f65828db4bf..13c82661487a82 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -8,7 +8,7 @@ "name": "solana-docs", "version": "0.0.0", "dependencies": { - "@crowdin/cli": "^3.6.1", + "@crowdin/cli": "^3.17.0", "@docusaurus/core": "^2.2.0", "@docusaurus/plugin-google-gtag": "^2.4.0", "@docusaurus/preset-classic": "^2.2.0", @@ -2029,12 +2029,15 @@ } }, "node_modules/@crowdin/cli": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/@crowdin/cli/-/cli-3.9.0.tgz", - "integrity": "sha512-4wQjqJZmU/mg3VYfRL6IYXw/pPAL9vdfW3QVSBovYA+bYaEt43ZuGsSrqeBGOhLehasWwRqklXWsl96gxQlLdw==", + "version": "3.17.0", + "resolved": "https://registry.npmjs.org/@crowdin/cli/-/cli-3.17.0.tgz", + "integrity": "sha512-ipr5wyBvpVuJ/DtJgDqTJiECu7zsVn9DwyTdf+sa0ukksXyiX3+H6wPm4eefIfEVSEaM92Q572dJZ5OnIH/Sag==", "dependencies": { - "njre": "^0.2.0", - "shelljs": "^0.8.4" + "command-exists-promise": "^2.0.2", + "node-fetch": "2.6.7", + "shelljs": "^0.8.4", + "tar": "^4.4.8", + "yauzl": "^2.10.0" }, "bin": { "crowdin": "jdeploy-bundle/jdeploy.js" @@ -9570,20 +9573,6 @@ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" }, - "node_modules/njre": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/njre/-/njre-0.2.0.tgz", - "integrity": "sha512-+Wq8R6VmjK+jI8a9NdzfU6Vh50r3tjsdvl5KJE1OyHeH8I/nx5Ptm12qpO3qNUbstXuZfBDgDL0qQZw9JyjhMw==", - "dependencies": { - "command-exists-promise": "^2.0.2", - "node-fetch": "^2.5.0", - "tar": "^4.4.8", - "yauzl": "^2.10.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/no-case": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", @@ -15762,12 +15751,15 @@ "optional": true }, "@crowdin/cli": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/@crowdin/cli/-/cli-3.9.0.tgz", - "integrity": "sha512-4wQjqJZmU/mg3VYfRL6IYXw/pPAL9vdfW3QVSBovYA+bYaEt43ZuGsSrqeBGOhLehasWwRqklXWsl96gxQlLdw==", + "version": "3.17.0", + "resolved": "https://registry.npmjs.org/@crowdin/cli/-/cli-3.17.0.tgz", + "integrity": "sha512-ipr5wyBvpVuJ/DtJgDqTJiECu7zsVn9DwyTdf+sa0ukksXyiX3+H6wPm4eefIfEVSEaM92Q572dJZ5OnIH/Sag==", "requires": { - "njre": "^0.2.0", - "shelljs": "^0.8.4" + "command-exists-promise": "^2.0.2", + "node-fetch": "2.6.7", + "shelljs": "^0.8.4", + "tar": "^4.4.8", + "yauzl": "^2.10.0" } }, "@cspotcode/source-map-support": { @@ -21261,17 +21253,6 @@ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==" }, - "njre": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/njre/-/njre-0.2.0.tgz", - "integrity": "sha512-+Wq8R6VmjK+jI8a9NdzfU6Vh50r3tjsdvl5KJE1OyHeH8I/nx5Ptm12qpO3qNUbstXuZfBDgDL0qQZw9JyjhMw==", - "requires": { - "command-exists-promise": "^2.0.2", - "node-fetch": "^2.5.0", - "tar": "^4.4.8", - "yauzl": "^2.10.0" - } - }, "no-case": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz", diff --git a/docs/package.json b/docs/package.json index 7279aa3e160586..c449ca7316fe8d 100644 --- a/docs/package.json +++ b/docs/package.json @@ -5,6 +5,7 @@ "scripts": { "start": "docusaurus start", "build": "docusaurus build", + "serve": "docusaurus serve", "clear": "docusaurus clear", "help": "docusaurus --help", "swizzle": "docusaurus swizzle", @@ -20,7 +21,7 @@ "crowdin:upload": "npm run write-i18n && crowdin upload" }, "dependencies": { - "@crowdin/cli": "^3.6.1", + "@crowdin/cli": "^3.17.0", "@docusaurus/core": "^2.2.0", "@docusaurus/plugin-google-gtag": "^2.4.0", "@docusaurus/preset-classic": "^2.2.0", diff --git a/docs/publish-docs.sh b/docs/publish-docs.sh index 06c34134db7462..0cbedcf882001d 100755 --- a/docs/publish-docs.sh +++ b/docs/publish-docs.sh @@ -62,7 +62,7 @@ cat > "$CONFIG_FILE" < ``` -### Extend a program +## Extend a program If a program has already been deployed, and a redeployment goes beyond the `max_len` of the account, it's possible to extend the program to fit the larger @@ -121,7 +121,7 @@ redeployment: solana program extend ``` -### Resuming a failed deploy +## Resuming a failed deploy If program deployment fails, there will be a hanging intermediate buffer account that contains a non-zero balance. In order to recoup that balance you may resume @@ -159,7 +159,7 @@ Then issue a new `deploy` command and specify the buffer: solana program deploy --buffer ``` -### Closing program and buffer accounts, and reclaiming their lamports +## Closing program and buffer accounts, and reclaiming their lamports Both program and buffer accounts can be closed and their lamport balances transferred to a recipient's account. @@ -214,7 +214,7 @@ To show all buffer accounts regardless of the authority solana program show --buffers --all ``` -### Set a program's upgrade authority +## Set a program's upgrade authority The program's upgrade authority must be present to deploy a program. If no authority is specified during program deployment, the default keypair is used as @@ -245,7 +245,7 @@ they do not have access to. The `--skip-new-upgrade-authority-signer-check` option relaxes the signer check. This can be useful for situations where the new upgrade authority is an offline signer or a multisig. -### Immutable programs +## Immutable programs A program can be marked immutable, which prevents all further redeployments, by specifying the `--final` flag during deployment: @@ -260,7 +260,7 @@ Or anytime after: solana program set-upgrade-authority --final ``` -### Dumping a program to a file +## Dumping a program to a file The deployed program may be dumped back to a local file: @@ -283,7 +283,7 @@ $ truncate -r dump.so extended.so $ sha256sum extended.so dump.so ``` -### Using an intermediary Buffer account +## Using an intermediary Buffer account Instead of deploying directly to the program account, the program can be written to an intermediary buffer account. Intermediary accounts can be useful for @@ -328,7 +328,7 @@ account are refunded to a spill account. Buffers also support `show` and `dump` just like programs do. -### Upgrading program using offline signer as authority +## Upgrading program using offline signer as authority Some security models require separating the signing process from the transaction broadcast, such that the signing keys can be completely disconnected from any network, also known as [offline signing](offline-signing.md). diff --git a/docs/src/cli/examples/durable-nonce.md b/docs/src/cli/examples/durable-nonce.md index 7f0199b8d44fc5..11c90c3936b348 100644 --- a/docs/src/cli/examples/durable-nonce.md +++ b/docs/src/cli/examples/durable-nonce.md @@ -1,5 +1,7 @@ --- -title: Durable Transaction Nonces +title: Durable Transaction Nonces in the Solana CLI +pagination_label: "Solana CLI: Durable Transaction Nonces" +sidebar_label: Durable Transaction Nonces --- Durable transaction nonces are a mechanism for getting around the typical short diff --git a/docs/src/cli/examples/offline-signing.md b/docs/src/cli/examples/offline-signing.md index 8b9312853a9a11..28b54561732878 100644 --- a/docs/src/cli/examples/offline-signing.md +++ b/docs/src/cli/examples/offline-signing.md @@ -1,5 +1,7 @@ --- -title: Offline Transaction Signing +title: Offline Transaction Signing with the Solana CLI +pagination_label: "Solana CLI: Offline Transaction Signing" +sidebar_label: Offline Transaction Signing --- Some security models require keeping signing keys, and thus the signing diff --git a/docs/src/cli/examples/sign-offchain-message.md b/docs/src/cli/examples/sign-offchain-message.md index ae14119f7b91b9..578ba511eee0d6 100644 --- a/docs/src/cli/examples/sign-offchain-message.md +++ b/docs/src/cli/examples/sign-offchain-message.md @@ -1,5 +1,7 @@ --- -title: Off-Chain Message Signing +title: Off-Chain Message Signing with the Solana CLI +pagination_label: "Solana CLI: Off-Chain Message Signing" +sidebar_label: Off-Chain Message Signing --- Off-chain message signing is a method of signing non-transaction messages with diff --git a/docs/src/cli/examples/test-validator.md b/docs/src/cli/examples/test-validator.md index 70f050c77f5663..4641e36b55125e 100644 --- a/docs/src/cli/examples/test-validator.md +++ b/docs/src/cli/examples/test-validator.md @@ -1,5 +1,6 @@ --- title: Solana Test Validator +pagination_label: "Solana CLI: Test Validator" sidebar_label: Test Validator --- diff --git a/docs/src/cli/examples/transfer-tokens.md b/docs/src/cli/examples/transfer-tokens.md index 89374ebf43864a..28c933be6665de 100644 --- a/docs/src/cli/examples/transfer-tokens.md +++ b/docs/src/cli/examples/transfer-tokens.md @@ -1,5 +1,7 @@ --- -title: Send and Receive Tokens +title: Send and Receive Tokens with the Solana CLI +pagination_label: "Solana CLI: Send and Receive Tokens" +sidebar_label: Send and Receive Tokens --- This page describes how to receive and send SOL tokens using the command line diff --git a/docs/src/cli/index.md b/docs/src/cli/index.md index cdf1ed10f83d76..77574419618354 100644 --- a/docs/src/cli/index.md +++ b/docs/src/cli/index.md @@ -1,7 +1,8 @@ --- title: Solana CLI Tool Suite -sidebar_label: Overview sidebar_position: 0 +sidebar_label: Overview +pagination_label: Solana CLI Tool Suite --- In this section, we will describe how to use the Solana command-line tools to @@ -19,6 +20,6 @@ secure access to your Solana accounts. To get started using the Solana Command Line (CLI) tools: - [Install the Solana CLI Tool Suite](./install.md) -- [Choose a Cluster](./examples/choose-a-cluster.md) -- [Create a Wallet](./wallets/index.md) - [Introduction to our CLI conventions](./intro.md) +- [Create a Wallet using the CLI](./wallets/index.md) +- [Choose a Cluster to connect to using the CLI](./examples/choose-a-cluster.md) diff --git a/docs/src/cli/install.md b/docs/src/cli/install.md index 7773631dda59d3..3667c733e3f4d4 100644 --- a/docs/src/cli/install.md +++ b/docs/src/cli/install.md @@ -1,5 +1,6 @@ --- title: Install the Solana CLI +pagination_label: Install the Solana CLI sidebar_label: Installation sidebar_position: 1 --- diff --git a/docs/src/cli/intro.md b/docs/src/cli/intro.md index 1701450173e39a..436776ee718e14 100644 --- a/docs/src/cli/intro.md +++ b/docs/src/cli/intro.md @@ -1,5 +1,6 @@ --- title: Introduction to the Solana CLI +pagination_label: Introduction to the Solana CLI sidebar_label: Introduction sidebar_position: 2 --- @@ -45,7 +46,7 @@ solana-keygen pubkey Below, we show how to resolve what you should put in `` depending on your wallet type. -#### Paper Wallet +## Paper Wallet In a paper wallet, the keypair is securely derived from the seed words and optional passphrase you entered when the wallet was created. To use a paper @@ -59,7 +60,7 @@ To display the wallet address of a Paper Wallet: solana-keygen pubkey prompt:// ``` -#### File System Wallet +## File System Wallet With a file system wallet, the keypair is stored in a file on your computer. Replace `` with the complete file path to the keypair file. @@ -71,7 +72,7 @@ For example, if the file system keypair file location is solana-keygen pubkey /home/solana/my_wallet.json ``` -#### Hardware Wallet +## Hardware Wallet If you chose a hardware wallet, use your [keypair URL](./wallets/hardware/index.md#specify-a-hardware-wallet-key), diff --git a/docs/src/cli/wallets/file-system.md b/docs/src/cli/wallets/file-system.md index 0041c51876b490..dd21203c4e1451 100644 --- a/docs/src/cli/wallets/file-system.md +++ b/docs/src/cli/wallets/file-system.md @@ -1,5 +1,7 @@ --- -title: File System Wallets +title: File System Wallets using the CLI +pagination_label: File System Wallets using the CLI +sidebar_label: File System Wallets sidebar_position: 2 --- diff --git a/docs/src/cli/wallets/hardware/index.md b/docs/src/cli/wallets/hardware/index.md index 9c8642cf34c6a1..30f53f86d3a3d8 100644 --- a/docs/src/cli/wallets/hardware/index.md +++ b/docs/src/cli/wallets/hardware/index.md @@ -1,5 +1,6 @@ --- title: Using Hardware Wallets in the Solana CLI +pagination_label: "Using Hardware Wallets in the Solana CLI" sidebar_label: Using in the Solana CLI sidebar_position: 0 --- diff --git a/docs/src/cli/wallets/hardware/ledger.md b/docs/src/cli/wallets/hardware/ledger.md index e0060aba803eb2..e5a45c63df07d8 100644 --- a/docs/src/cli/wallets/hardware/ledger.md +++ b/docs/src/cli/wallets/hardware/ledger.md @@ -1,5 +1,6 @@ --- title: Using Ledger Nano Hardware Wallets in the Solana CLI +pagination_label: "Hardware Wallets in the Solana CLI: Ledger Nano" sidebar_label: Ledger Nano --- diff --git a/docs/src/cli/wallets/index.md b/docs/src/cli/wallets/index.md index fcd907629c8d85..9643ef61ec13eb 100644 --- a/docs/src/cli/wallets/index.md +++ b/docs/src/cli/wallets/index.md @@ -1,5 +1,6 @@ --- -title: Command Line Wallets +title: Solana Wallets with the CLI +pagination_label: Command Line Wallets sidebar_label: Overview sidebar_position: 0 --- diff --git a/docs/src/cli/wallets/paper.md b/docs/src/cli/wallets/paper.md index 85c76779b852ed..4e3c3c39ac8732 100644 --- a/docs/src/cli/wallets/paper.md +++ b/docs/src/cli/wallets/paper.md @@ -1,5 +1,7 @@ --- -title: Paper Wallets +title: Paper Wallets using the Solana CLI +pagination_label: Paper Wallets using the CLI +sidebar_label: Paper Wallets sidebar_position: 1 --- diff --git a/docs/src/clusters/available.md b/docs/src/clusters/available.md index 7abfb06880e858..dfbca41672b499 100644 --- a/docs/src/clusters/available.md +++ b/docs/src/clusters/available.md @@ -1,5 +1,7 @@ --- -title: Solana Clusters +title: Available Solana Clusters +sidebar_label: Solana Clusters +pagination_label: Available Solana Clusters --- Solana maintains several different clusters with different purposes. diff --git a/docs/src/clusters/index.md b/docs/src/clusters/index.md index 8ac1dee11d6e68..e2d25c603b4388 100644 --- a/docs/src/clusters/index.md +++ b/docs/src/clusters/index.md @@ -1,7 +1,8 @@ --- -title: A Solana Cluster -sidebar_label: Overview +title: Overview of a Solana Cluster sidebar_position: 0 +sidebar_label: Overview +pagination_label: Overview of a Solana Cluster --- A Solana cluster is a set of validators working together to serve client transactions and maintain the integrity of the ledger. Many clusters may coexist. When two clusters share a common genesis block, they attempt to converge. Otherwise, they simply ignore the existence of the other. Transactions sent to the wrong one are quietly rejected. In this section, we'll discuss how a cluster is created, how nodes join the cluster, how they share the ledger, how they ensure the ledger is replicated, and how they cope with buggy and malicious nodes. diff --git a/docs/src/clusters/metrics.md b/docs/src/clusters/metrics.md index 575c46a26e0019..c162ed50117ca4 100644 --- a/docs/src/clusters/metrics.md +++ b/docs/src/clusters/metrics.md @@ -1,5 +1,7 @@ --- -title: Performance Metrics +title: Solana Cluster Performance Metrics +sidebar_label: Performance Metrics +pagination_label: Cluster Performance Metrics --- Solana cluster performance is measured as average number of transactions per second that the network can sustain \(TPS\). And, how long it takes for a transaction to be confirmed by super majority of the cluster \(Confirmation Time\). diff --git a/docs/src/consensus/commitments.md b/docs/src/consensus/commitments.md index 0bfb55e9237a34..404f41dc7f5f71 100644 --- a/docs/src/consensus/commitments.md +++ b/docs/src/consensus/commitments.md @@ -1,5 +1,7 @@ --- -title: Commitment Status +title: Solana Commitment Status +sidebar_label: Commitment Status +pagination_label: Consensus Commitment Status description: "Processed, confirmed, and finalized. Learn the differences between the different commitment statuses on the Solana blockchain." diff --git a/docs/src/consensus/leader-rotation.md b/docs/src/consensus/leader-rotation.md index a52cbb7eafc465..c65d91c7306176 100644 --- a/docs/src/consensus/leader-rotation.md +++ b/docs/src/consensus/leader-rotation.md @@ -1,5 +1,7 @@ --- -title: Leader Rotation +title: Solana Leader Rotation +sidebar_label: Leader Rotation +pagination_label: Leader Rotation --- At any given moment, a cluster expects only one validator to produce ledger entries. By having only one leader at a time, all validators are able to replay identical copies of the ledger. The drawback of only one leader at a time, however, is that a malicious leader is capable of censoring votes and transactions. Since censoring cannot be distinguished from the network dropping packets, the cluster cannot simply elect a single node to hold the leader role indefinitely. Instead, the cluster minimizes the influence of a malicious leader by rotating which node takes the lead. diff --git a/docs/src/index.mdx b/docs/src/index.mdx index 422404b0a7379b..b7a098ea747132 100644 --- a/docs/src/index.mdx +++ b/docs/src/index.mdx @@ -3,10 +3,10 @@ slug: / id: home title: Home sidebar_label: Home +pagination_label: Solana Validator Documentation Home description: "Solana is a high performance network that is utilized for a range of use cases, \ including finance, NFTs, payments, and gaming." -# displayed_sidebar: introductionSidebar --- # Solana Validator Documentation @@ -55,6 +55,6 @@ Explore what it takes to operate a Solana validator and help secure the network. ## Learn more -import HomeCtaLinks from "../components/HomeCtaLinks"; +import HomeCtaLinks from "@site/components/HomeCtaLinks"; diff --git a/docs/src/operations/_category_.json b/docs/src/operations/_category_.json index a32cdd91fe0f18..289f63ff9507ea 100644 --- a/docs/src/operations/_category_.json +++ b/docs/src/operations/_category_.json @@ -2,9 +2,5 @@ "position": 4, "label": "Operating a Validator", "collapsible": true, - "collapsed": true, - "link": { - "type": "doc", - "id": "operations/index" - } + "collapsed": true } diff --git a/docs/src/operations/best-practices/general.md b/docs/src/operations/best-practices/general.md index 3bd0f906f729a7..29ef42c81b7f5f 100644 --- a/docs/src/operations/best-practices/general.md +++ b/docs/src/operations/best-practices/general.md @@ -1,6 +1,7 @@ --- -title: Validator Operations Best Practices +title: Solana Validator Operations Best Practices sidebar_label: General Operations +pagination_label: "Best Practices: Validator Operations" --- After you have successfully setup and started a diff --git a/docs/src/operations/best-practices/monitoring.md b/docs/src/operations/best-practices/monitoring.md index b866a88b869531..6d04fc38487be7 100644 --- a/docs/src/operations/best-practices/monitoring.md +++ b/docs/src/operations/best-practices/monitoring.md @@ -1,6 +1,7 @@ --- -title: Validator Monitoring Best Practices +title: Solana Validator Monitoring Best Practices sidebar_label: Monitoring +pagination_label: "Best Practices: Validator Monitoring" --- It is essential that you have monitoring in place on your validator. In the event that your validator is delinquent (behind the rest of the network) you want to respond immediately to fix the issue. One very useful tool to monitor your validator is [`solana-watchtower`](#solana-watchtower). diff --git a/docs/src/operations/best-practices/security.md b/docs/src/operations/best-practices/security.md index d53491c115ae9f..fab46b665ad7fa 100644 --- a/docs/src/operations/best-practices/security.md +++ b/docs/src/operations/best-practices/security.md @@ -1,6 +1,7 @@ --- -title: Validator Security Best Practices +title: Solana Validator Security Best Practices sidebar_label: Security +pagination_label: "Best Practices: Validator Security" --- Being a system administrator for an Ubuntu computer requires technical knowledge of the system and best security practices. The following list should help you get started and is considered the bare minimum for keeping your system safe. diff --git a/docs/src/operations/guides/restart-cluster.md b/docs/src/operations/guides/restart-cluster.md index 4039f69a6b468f..85d4731d604c65 100644 --- a/docs/src/operations/guides/restart-cluster.md +++ b/docs/src/operations/guides/restart-cluster.md @@ -1,4 +1,10 @@ -## Restarting a cluster +--- +title: "Restarting a Solana Cluster" +# really high number to ensure it is listed last in the sidebar +sidebar_position: 999 +sidebar_label: Restart a Cluster +pagination_label: "Validator Guides: Restart a Cluster" +--- ### Step 1. Identify the latest optimistically confirmed slot for the cluster diff --git a/docs/src/operations/guides/validator-failover.md b/docs/src/operations/guides/validator-failover.md index 34968b73640933..168a1a4312cec0 100644 --- a/docs/src/operations/guides/validator-failover.md +++ b/docs/src/operations/guides/validator-failover.md @@ -1,5 +1,8 @@ --- -title: Failover Setup +title: "Validator Guide: Setup Node Failover" +sidebar_position: 9 +sidebar_label: Node Failover +pagination_label: "Validator Guides: Node Failover" --- A simple two machine instance failover method is described here, which allows you to: diff --git a/docs/src/operations/guides/validator-info.md b/docs/src/operations/guides/validator-info.md index 5b232ba02d0b32..56b74f732c4232 100644 --- a/docs/src/operations/guides/validator-info.md +++ b/docs/src/operations/guides/validator-info.md @@ -1,5 +1,8 @@ --- -title: Publishing Validator Info +title: "Validator Guide: Publishing Validator Info" +sidebar_position: 1 +sidebar_label: Publishing Validator Info +pagination_label: "Validator Guides: Publishing Validator Info" --- You can publish your validator information to the chain to be publicly visible to other users. diff --git a/docs/src/operations/guides/validator-monitor.md b/docs/src/operations/guides/validator-monitor.md index ef187271917302..5e314c52e82b20 100644 --- a/docs/src/operations/guides/validator-monitor.md +++ b/docs/src/operations/guides/validator-monitor.md @@ -1,5 +1,8 @@ --- -title: Monitoring a Validator +title: "Validator Guide: Monitoring a Validator" +sidebar_position: 2 +sidebar_label: Monitoring a Validator +pagination_label: "Validator Guides: Monitoring a Validator" --- ## Check Gossip diff --git a/docs/src/operations/guides/validator-stake.md b/docs/src/operations/guides/validator-stake.md index 85da5c3380316a..da43c3071d4fb7 100644 --- a/docs/src/operations/guides/validator-stake.md +++ b/docs/src/operations/guides/validator-stake.md @@ -1,5 +1,8 @@ --- -title: Staking +title: "Validator Guide: Staking" +sidebar_position: 3 +sidebar_label: Staking +pagination_label: "Validator Guides: Staking" --- **By default your validator will have no stake.** This means it will be diff --git a/docs/src/operations/guides/validator-start.md b/docs/src/operations/guides/validator-start.md index 69cef1315c05b8..378783798b3ce8 100644 --- a/docs/src/operations/guides/validator-start.md +++ b/docs/src/operations/guides/validator-start.md @@ -1,5 +1,8 @@ --- -title: Starting a Validator +title: "Validator Guide: Starting a Validator" +sidebar_position: 0 +sidebar_label: Starting a Validator +pagination_label: "Validator Guides: Starting a Validator" --- ## Configure Solana CLI diff --git a/docs/src/operations/guides/validator-troubleshoot.md b/docs/src/operations/guides/validator-troubleshoot.md index abf8d8f442c33a..17ae09cb4d6510 100644 --- a/docs/src/operations/guides/validator-troubleshoot.md +++ b/docs/src/operations/guides/validator-troubleshoot.md @@ -1,5 +1,8 @@ --- -title: Troubleshooting +title: "Validator Guide: Troubleshooting" +sidebar_position: 4 +sidebar_label: Troubleshooting +pagination_label: "Validator Guides: Troubleshooting" --- There is a `#validator-support` Discord channel available to reach other diff --git a/docs/src/operations/guides/vote-accounts.md b/docs/src/operations/guides/vote-accounts.md index c86b66cb85bba4..b962b1a1dffa43 100644 --- a/docs/src/operations/guides/vote-accounts.md +++ b/docs/src/operations/guides/vote-accounts.md @@ -1,5 +1,8 @@ --- -title: Vote Account Management +title: "Validator Guide: Vote Account Management" +sidebar_position: 5 +sidebar_label: Vote Account Management +pagination_label: "Validator Guides: Vote Account Management" --- This page describes how to set up an on-chain _vote account_. Creating a vote diff --git a/docs/src/operations/prerequisites.md b/docs/src/operations/prerequisites.md index c44c15fc205300..fb37d9ec4de3ff 100644 --- a/docs/src/operations/prerequisites.md +++ b/docs/src/operations/prerequisites.md @@ -1,7 +1,8 @@ --- title: Solana Validator Prerequisites -sidebar_label: Prerequisites sidebar_position: 2 +sidebar_label: Prerequisites +pagination_label: Prerequisites to run a Validator --- Operating a Solana validator is an interesting and rewarding task. Generally speaking, it requires someone with a technical background but also involves community engagement and marketing. diff --git a/docs/src/operations/requirements.md b/docs/src/operations/requirements.md index 8c9e8d62cb5a08..2c9cf576e1fcf9 100644 --- a/docs/src/operations/requirements.md +++ b/docs/src/operations/requirements.md @@ -1,7 +1,8 @@ --- -title: Validator Requirements -sidebar_label: Requirements +title: Solana Validator Requirements sidebar_position: 3 +sidebar_label: Requirements +pagination_label: Requirements to Operate a Validator --- ## Minimum SOL requirements diff --git a/docs/src/operations/validator-or-rpc-node.md b/docs/src/operations/validator-or-rpc-node.md index c07c5201f100f9..ca4ded555972c2 100644 --- a/docs/src/operations/validator-or-rpc-node.md +++ b/docs/src/operations/validator-or-rpc-node.md @@ -1,7 +1,8 @@ --- title: Consensus Validator or RPC Node? -sidebar_label: Validator vs RPC Node sidebar_position: 1 +sidebar_label: Validator vs RPC Node +pagination_label: Consensus Validator vs RPC Node --- Operators who run a [consensus validator](../what-is-a-validator.md) have much diff --git a/docs/src/runtime/programs.md b/docs/src/runtime/programs.md index ae6b0127a2e490..018169ee1c68f9 100644 --- a/docs/src/runtime/programs.md +++ b/docs/src/runtime/programs.md @@ -1,5 +1,7 @@ --- -title: "Native Programs" +title: "Native Programs in the Solana Runtime" +pagination_label: Runtime Native Programs +sidebar_label: Native Programs --- Solana contains a small handful of native programs, which are required to run diff --git a/docs/src/runtime/sysvars.md b/docs/src/runtime/sysvars.md index 99d271f0a3c056..36c00747bfaa03 100644 --- a/docs/src/runtime/sysvars.md +++ b/docs/src/runtime/sysvars.md @@ -1,5 +1,7 @@ --- -title: Sysvar Cluster Data +title: Solana Sysvar Cluster Data +pagination_label: Runtime Sysvar Cluster Data +sidebar_label: Sysvar Cluster Data --- Solana exposes a variety of cluster state data to programs via diff --git a/docs/src/runtime/zk-token-proof.md b/docs/src/runtime/zk-token-proof.md index 4127409eeb00fa..35384f17c9396b 100644 --- a/docs/src/runtime/zk-token-proof.md +++ b/docs/src/runtime/zk-token-proof.md @@ -1,5 +1,7 @@ --- -title: ZK Token Proof Program +title: Solana ZK Token Proof Program +pagination_label: Native ZK Token Proof Program +sidebar_label: ZK Token Proof Program --- The native Solana ZK Token proof program verifies a number of zero-knowledge @@ -39,7 +41,8 @@ cannot change the original value that is contained in a commitment. Interested readers can refer to the following resources for a more in-depth treatment of Pedersen commitment and the (twisted) ElGamal encryption schemes. -- [Notes](./zk-docs/twisted_elgamal.pdf) on the twisted ElGamal encryption +- [Notes](https://github.com/solana-labs/solana/blob/master/docs/src/runtime/zk-docs/twisted_elgamal.pdf) + on the twisted ElGamal encryption - A technical [overview](https://github.com/solana-labs/solana-program-library/blob/master/token/zk-token-protocol-paper/part1.pdf) of the SPL Token 2022 confidential extension @@ -96,14 +99,14 @@ The ZK Token proof program supports the following list of zero-knowledge proofs. - The ElGamal public-key validity proof instruction certifies that an ElGamal public-key is a properly formed public key. - Mathematical description and proof of security: - [[Notes]](./zk-docs/pubkey_proof.pdf) + [[Notes]](https://github.com/solana-labs/solana/blob/master/docs/src/runtime/zk-docs/pubkey_proof.pdf) - `VerifyZeroBalance`: - The zero-balance proof certifies that an ElGamal ciphertext encrypts the number zero. - Mathematical description and proof of security: - [[Notes]](./zk-docs/zero_proof.pdf) + [[Notes]](https://github.com/solana-labs/solana/blob/master/docs/src/runtime/zk-docs/zero_proof.pdf) #### Equality proofs @@ -112,11 +115,11 @@ The ZK Token proof program supports the following list of zero-knowledge proofs. - The ciphertext-commitment equality proof certifies that an ElGamal ciphertext and a Pedersen commitment encode the same message. - Mathematical description and proof of security: - [[Notes]](./zk-docs/ciphertext_commitment_equality.pdf) + [[Notes]](https://github.com/solana-labs/solana/blob/master/docs/src/runtime/zk-docs/ciphertext_commitment_equality.pdf) - `VerifyCiphertextCiphertextEquality`: - The ciphertext-ciphertext equality proof certifies that two ElGamal ciphertexts encrypt the same message. - Mathematical description and proof of security: - [[Notes]](./zk-docs/ciphertext_ciphertext_equality.pdf) + [[Notes]](https://github.com/solana-labs/solana/blob/master/docs/src/runtime/zk-docs/ciphertext_ciphertext_equality.pdf) diff --git a/docs/src/validator/anatomy.md b/docs/src/validator/anatomy.md index 5a61eeff7ef11c..465b08d3fa904d 100644 --- a/docs/src/validator/anatomy.md +++ b/docs/src/validator/anatomy.md @@ -1,7 +1,8 @@ --- title: Anatomy of a Validator -sidebar_label: Anatomy sidebar_position: 1 +sidebar_label: Anatomy +pagination_label: Anatomy of a Validator --- ![Validator block diagrams](/img/validator.svg) diff --git a/docs/src/validator/blockstore.md b/docs/src/validator/blockstore.md index e49e576bf26353..71d41d8a39275d 100644 --- a/docs/src/validator/blockstore.md +++ b/docs/src/validator/blockstore.md @@ -1,6 +1,8 @@ --- -title: Blockstore +title: Blockstore in a Solana Validator sidebar_position: 3 +sidebar_label: Blockstore +pagination_label: Validator Blockstore --- After a block reaches finality, all blocks from that one on down to the genesis block form a linear chain with the familiar name blockchain. Until that point, however, the validator must maintain all potentially valid chains, called _forks_. The process by which forks naturally form as a result of leader rotation is described in [fork generation](../consensus/fork-generation.md). The _blockstore_ data structure described here is how a validator copes with those forks until blocks are finalized. diff --git a/docs/src/validator/geyser.md b/docs/src/validator/geyser.md index 3ea07473a61f88..a8a29d10dd022a 100644 --- a/docs/src/validator/geyser.md +++ b/docs/src/validator/geyser.md @@ -1,5 +1,7 @@ --- -title: Geyser Plugins +title: Solana Validator Geyser Plugins +sidebar_label: Geyser Plugins +pagination_label: Validator Geyser Plugins --- ## Overview diff --git a/docs/src/validator/gossip.md b/docs/src/validator/gossip.md index 3c637f5c707357..f0a2e43f511414 100644 --- a/docs/src/validator/gossip.md +++ b/docs/src/validator/gossip.md @@ -1,6 +1,8 @@ --- -title: Gossip Service +title: Gossip Service in a Solana Validator sidebar_position: 5 +sidebar_label: Gossip Service +pagination_label: Validator Gossip Service --- The Gossip Service acts as a gateway to nodes in the diff --git a/docs/src/validator/runtime.md b/docs/src/validator/runtime.md index 2bf8a52563f88b..a9afba3c056b31 100644 --- a/docs/src/validator/runtime.md +++ b/docs/src/validator/runtime.md @@ -1,6 +1,8 @@ --- -title: Runtime +title: Solana Runtime on a Solana Validator sidebar_position: 6 +sidebar_label: Runtime +pagination_label: Validator Runtime --- The runtime is a concurrent transaction processor. Transactions specify their data dependencies upfront and dynamic memory allocation is explicit. By separating program code from the state it operates on, the runtime is able to choreograph concurrent access. Transactions accessing only read-only accounts are executed in parallel whereas transactions accessing writable accounts are serialized. The runtime interacts with the program through an entrypoint with a well-defined interface. The data stored in an account is an opaque type, an array of bytes. The program has full control over its contents. diff --git a/docs/src/validator/tpu.md b/docs/src/validator/tpu.md index 0082902078d76e..7585911bf20179 100644 --- a/docs/src/validator/tpu.md +++ b/docs/src/validator/tpu.md @@ -1,7 +1,8 @@ --- -title: Transaction Processing Unit -sidebar_label: TPU +title: Transaction Processing Unit in a Solana Validator sidebar_position: 2 +sidebar_label: TPU +pagination_label: Validator's Transaction Processing Unit (TPU) --- TPU (Transaction Processing Unit) is the logic of the validator diff --git a/docs/src/validator/tvu.md b/docs/src/validator/tvu.md index e3ac7776f0d128..362b6bae165975 100644 --- a/docs/src/validator/tvu.md +++ b/docs/src/validator/tvu.md @@ -1,7 +1,8 @@ --- -title: Transaction Validation Unit -sidebar_label: TVU +title: Transaction Validation Unit in a Solana Validator sidebar_position: 3 +sidebar_label: TVU +pagination_label: Validator's Transaction Validation Unit (TVU) --- TVU (Transaction Validation Unit) is the logic of the validator diff --git a/geyser-plugin-manager/src/block_metadata_notifier.rs b/geyser-plugin-manager/src/block_metadata_notifier.rs index ab56cf3be81701..76d203c5e0ed44 100644 --- a/geyser-plugin-manager/src/block_metadata_notifier.rs +++ b/geyser-plugin-manager/src/block_metadata_notifier.rs @@ -4,13 +4,12 @@ use { geyser_plugin_manager::GeyserPluginManager, }, log::*, - solana_accounts_db::stake_rewards::RewardInfo, solana_geyser_plugin_interface::geyser_plugin_interface::{ ReplicaBlockInfoV3, ReplicaBlockInfoVersions, }, solana_measure::measure::Measure, solana_metrics::*, - solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey}, + solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey, reward_info::RewardInfo}, solana_transaction_status::{Reward, Rewards}, std::sync::{Arc, RwLock}, }; diff --git a/geyser-plugin-manager/src/block_metadata_notifier_interface.rs b/geyser-plugin-manager/src/block_metadata_notifier_interface.rs index 465f700efe3275..bb0ffe4c7f7513 100644 --- a/geyser-plugin-manager/src/block_metadata_notifier_interface.rs +++ b/geyser-plugin-manager/src/block_metadata_notifier_interface.rs @@ -1,6 +1,5 @@ use { - solana_accounts_db::stake_rewards::RewardInfo, - solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey}, + solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey, reward_info::RewardInfo}, std::sync::{Arc, RwLock}, }; diff --git a/keygen/src/keygen.rs b/keygen/src/keygen.rs index 21f430daff9781..32d57a4c2f1333 100644 --- a/keygen/src/keygen.rs +++ b/keygen/src/keygen.rs @@ -488,7 +488,8 @@ fn do_main(matches: &ArgMatches) -> Result<(), Box> { let derivation_path = acquire_derivation_path(matches)?; let mnemonic = Mnemonic::new(mnemonic_type, language); - let (passphrase, passphrase_message) = acquire_passphrase_and_message(matches).unwrap(); + let (passphrase, passphrase_message) = acquire_passphrase_and_message(matches) + .map_err(|err| format!("Unable to acquire passphrase: {err}"))?; let seed = Seed::new(&mnemonic, &passphrase); let keypair = match derivation_path { diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index ddc1ca9b564e94..6da42940a4ba7f 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -44,6 +44,7 @@ solana-sdk = { workspace = true } solana-stake-program = { workspace = true } solana-storage-bigtable = { workspace = true } solana-streamer = { workspace = true } +solana-svm = { workspace = true } solana-transaction-status = { workspace = true } solana-unified-scheduler-pool = { workspace = true } solana-version = { workspace = true } diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index 1c6f9744437555..80ea6f9715bf35 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -12,8 +12,8 @@ use { blockstore_processor::ProcessOptions, use_snapshot_archives_at_startup::{self, UseSnapshotArchivesAtStartup}, }, - solana_runtime::runtime_config::RuntimeConfig, solana_sdk::clock::Slot, + solana_svm::runtime_config::RuntimeConfig, std::{ collections::HashSet, path::{Path, PathBuf}, diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index bcf87e826ec72e..2663a205fb5f37 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -4,7 +4,8 @@ use { crossbeam_channel::unbounded, log::*, solana_accounts_db::{ - hardened_unpack::open_genesis_config, utils::create_all_accounts_run_and_snapshot_dirs, + hardened_unpack::open_genesis_config, + utils::{create_all_accounts_run_and_snapshot_dirs, move_and_async_delete_path_contents}, }, solana_core::{ accounts_hash_verifier::AccountsHashVerifier, validator::BlockVerificationMethod, @@ -35,9 +36,7 @@ use { prioritization_fee_cache::PrioritizationFeeCache, snapshot_config::SnapshotConfig, snapshot_hash::StartingSnapshotHashes, - snapshot_utils::{ - self, clean_orphaned_account_snapshot_dirs, move_and_async_delete_path_contents, - }, + snapshot_utils::{self, clean_orphaned_account_snapshot_dirs}, }, solana_sdk::{ clock::Slot, genesis_config::GenesisConfig, pubkey::Pubkey, @@ -274,7 +273,6 @@ pub fn load_and_process_ledger( genesis_config, blockstore.as_ref(), account_paths, - None, snapshot_config.as_ref(), &process_options, None, diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index d4a5a3eb18ea69..500a64173a25c4 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -6,7 +6,9 @@ use { blockstore::*, ledger_path::*, ledger_utils::*, - output::{output_account, AccountsOutputConfig, AccountsOutputStreamer}, + output::{ + output_account, AccountsOutputConfig, AccountsOutputMode, AccountsOutputStreamer, + }, program::*, }, clap::{ @@ -1312,6 +1314,7 @@ fn main() { .arg(&geyser_plugin_args) .arg(&accounts_data_encoding_arg) .arg(&use_snapshot_archives_at_startup) + .arg(&max_genesis_archive_unpacked_size_arg) .arg( Arg::with_name("include_sysvars") .long("include-sysvars") @@ -1333,7 +1336,27 @@ fn main() { .takes_value(false) .help("Do not print account data when printing account contents."), ) - .arg(&max_genesis_archive_unpacked_size_arg), + .arg( + Arg::with_name("account") + .long("account") + .takes_value(true) + .value_name("PUBKEY") + .validator(is_pubkey) + .multiple(true) + .help( + "Limit output to accounts corresponding to the specified pubkey(s), \ + may be specified multiple times", + ), + ) + .arg( + Arg::with_name("program_accounts") + .long("program-accounts") + .takes_value(true) + .value_name("PUBKEY") + .validator(is_pubkey) + .conflicts_with("account") + .help("Limit output to accounts owned by the provided program pubkey"), + ), ) .subcommand( SubCommand::with_name("capitalization") @@ -2179,7 +2202,18 @@ fn main() { let include_account_contents = !arg_matches.is_present("no_account_contents"); let include_account_data = !arg_matches.is_present("no_account_data"); let account_data_encoding = parse_encoding_format(arg_matches); + let mode = if let Some(pubkeys) = pubkeys_of(arg_matches, "account") { + info!("Scanning individual accounts: {pubkeys:?}"); + AccountsOutputMode::Individual(pubkeys) + } else if let Some(pubkey) = pubkey_of(arg_matches, "program_accounts") { + info!("Scanning program accounts for {pubkey}"); + AccountsOutputMode::Program(pubkey) + } else { + info!("Scanning all accounts"); + AccountsOutputMode::All + }; let config = AccountsOutputConfig { + mode, include_sysvars, include_account_contents, include_account_data, diff --git a/ledger-tool/src/output.rs b/ledger-tool/src/output.rs index e21676771d598f..2de702ef44ce5d 100644 --- a/ledger-tool/src/output.rs +++ b/ledger-tool/src/output.rs @@ -6,6 +6,7 @@ use { Deserialize, Serialize, }, solana_account_decoder::{UiAccount, UiAccountData, UiAccountEncoding}, + solana_accounts_db::accounts_index::ScanConfig, solana_cli_output::{ display::writeln_transaction, CliAccount, CliAccountNewConfig, OutputFormat, QuietDisplay, VerboseDisplay, @@ -572,7 +573,14 @@ pub struct AccountsOutputStreamer { output_format: OutputFormat, } +pub enum AccountsOutputMode { + All, + Individual(Vec), + Program(Pubkey), +} + pub struct AccountsOutputConfig { + pub mode: AccountsOutputMode, pub include_sysvars: bool, pub include_account_contents: bool, pub include_account_data: bool, @@ -608,7 +616,10 @@ impl AccountsOutputStreamer { .serialize_field("summary", &*self.total_accounts_stats.borrow()) .map_err(|err| format!("unable to serialize accounts summary: {err}"))?; SerializeStruct::end(struct_serializer) - .map_err(|err| format!("unable to end serialization: {err}")) + .map_err(|err| format!("unable to end serialization: {err}"))?; + // The serializer doesn't give us a trailing newline so do it ourselves + println!(); + Ok(()) } _ => { // The compiler needs a placeholder type to satisfy the generic @@ -637,6 +648,33 @@ impl AccountsScanner { && (self.config.include_sysvars || !solana_sdk::sysvar::is_sysvar_id(pubkey)) } + fn maybe_output_account( + &self, + seq_serializer: &mut Option, + pubkey: &Pubkey, + account: &AccountSharedData, + slot: Option, + cli_account_new_config: &CliAccountNewConfig, + ) where + S: SerializeSeq, + { + if self.config.include_account_contents { + if let Some(serializer) = seq_serializer { + let cli_account = + CliAccount::new_with_config(pubkey, account, cli_account_new_config); + serializer.serialize_element(&cli_account).unwrap(); + } else { + output_account( + pubkey, + account, + slot, + self.config.include_account_data, + self.config.account_data_encoding, + ); + } + } + } + pub fn output(&self, seq_serializer: &mut Option) where S: SerializeSeq, @@ -654,26 +692,53 @@ impl AccountsScanner { .filter(|(pubkey, account, _)| self.should_process_account(account, pubkey)) { total_accounts_stats.accumulate_account(pubkey, &account, rent_collector); - - if self.config.include_account_contents { - if let Some(serializer) = seq_serializer { - let cli_account = - CliAccount::new_with_config(pubkey, &account, &cli_account_new_config); - serializer.serialize_element(&cli_account).unwrap(); - } else { - output_account( - pubkey, - &account, - Some(slot), - self.config.include_account_data, - self.config.account_data_encoding, - ); - } - } + self.maybe_output_account( + seq_serializer, + pubkey, + &account, + Some(slot), + &cli_account_new_config, + ); } }; - self.bank.scan_all_accounts(scan_func).unwrap(); + match &self.config.mode { + AccountsOutputMode::All => { + self.bank.scan_all_accounts(scan_func).unwrap(); + } + AccountsOutputMode::Individual(pubkeys) => pubkeys.iter().for_each(|pubkey| { + if let Some((account, slot)) = self + .bank + .get_account_modified_slot_with_fixed_root(pubkey) + .filter(|(account, _)| self.should_process_account(account, pubkey)) + { + total_accounts_stats.accumulate_account(pubkey, &account, rent_collector); + self.maybe_output_account( + seq_serializer, + pubkey, + &account, + Some(slot), + &cli_account_new_config, + ); + } + }), + AccountsOutputMode::Program(program_pubkey) => self + .bank + .get_program_accounts(program_pubkey, &ScanConfig::default()) + .unwrap() + .iter() + .filter(|(pubkey, account)| self.should_process_account(account, pubkey)) + .for_each(|(pubkey, account)| { + total_accounts_stats.accumulate_account(pubkey, account, rent_collector); + self.maybe_output_account( + seq_serializer, + pubkey, + account, + None, + &cli_account_new_config, + ); + }), + } } } diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 87ba0c39235a12..7665428981ed82 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -54,6 +54,7 @@ solana-sdk = { workspace = true } solana-stake-program = { workspace = true } solana-storage-bigtable = { workspace = true } solana-storage-proto = { workspace = true } +solana-svm = { workspace = true } solana-transaction-status = { workspace = true } solana-vote = { workspace = true } solana-vote-program = { workspace = true } diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index 993f6d2c2f7645..48c03e1e6cc8e5 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -80,7 +80,6 @@ pub fn load( genesis_config: &GenesisConfig, blockstore: &Blockstore, account_paths: Vec, - shrink_paths: Option>, snapshot_config: Option<&SnapshotConfig>, process_options: ProcessOptions, transaction_status_sender: Option<&TransactionStatusSender>, @@ -93,7 +92,6 @@ pub fn load( genesis_config, blockstore, account_paths, - shrink_paths, snapshot_config, &process_options, cache_block_meta_sender, @@ -121,7 +119,6 @@ pub fn load_bank_forks( genesis_config: &GenesisConfig, blockstore: &Blockstore, account_paths: Vec, - shrink_paths: Option>, snapshot_config: Option<&SnapshotConfig>, process_options: &ProcessOptions, cache_block_meta_sender: Option<&CacheBlockMetaSender>, @@ -181,7 +178,6 @@ pub fn load_bank_forks( incremental_snapshot_archive_info, genesis_config, account_paths, - shrink_paths, snapshot_config, process_options, accounts_update_notifier, @@ -231,7 +227,6 @@ fn bank_forks_from_snapshot( incremental_snapshot_archive_info: Option, genesis_config: &GenesisConfig, account_paths: Vec, - shrink_paths: Option>, snapshot_config: &SnapshotConfig, process_options: &ProcessOptions, accounts_update_notifier: Option, @@ -345,10 +340,6 @@ fn bank_forks_from_snapshot( bank }; - if let Some(shrink_paths) = shrink_paths { - bank.set_shrink_paths(shrink_paths); - } - let full_snapshot_hash = FullSnapshotHash(( full_snapshot_archive_info.slot(), *full_snapshot_archive_info.hash(), diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 4fa5fa6f3aa808..b450caaa54577b 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -20,7 +20,6 @@ use { accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, epoch_accounts_hash::EpochAccountsHash, - rent_debits::RentDebits, transaction_results::{ TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, }, @@ -41,7 +40,6 @@ use { commitment::VOTE_THRESHOLD_SIZE, installed_scheduler_pool::BankWithScheduler, prioritization_fee_cache::PrioritizationFeeCache, - runtime_config::RuntimeConfig, transaction_batch::TransactionBatch, }, solana_sdk::{ @@ -50,6 +48,7 @@ use { genesis_config::GenesisConfig, hash::Hash, pubkey::Pubkey, + rent_debits::RentDebits, saturating_add_assign, signature::{Keypair, Signature}, timing, @@ -58,6 +57,7 @@ use { VersionedTransaction, }, }, + solana_svm::runtime_config::RuntimeConfig, solana_transaction_status::token_balances::TransactionTokenBalancesSet, solana_vote::{vote_account::VoteAccountsHashMap, vote_sender_types::ReplayVoteSender}, std::{ @@ -745,7 +745,6 @@ pub fn test_process_blockstore( blockstore, Vec::new(), None, - None, opts, None, None, diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 54c27e237da980..e3c896f71befa8 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -692,7 +692,7 @@ pub mod layout { Ok(flags & ShredFlags::SHRED_TICK_REFERENCE_MASK.bits()) } - pub(crate) fn get_merkle_root(shred: &[u8]) -> Option { + pub fn get_merkle_root(shred: &[u8]) -> Option { match get_shred_variant(shred).ok()? { ShredVariant::LegacyCode | ShredVariant::LegacyData => None, ShredVariant::MerkleCode(proof_size, chained) => { diff --git a/ledger/src/shred/stats.rs b/ledger/src/shred/stats.rs index 5b4a75a2489bbb..60dfa9a79859c2 100644 --- a/ledger/src/shred/stats.rs +++ b/ledger/src/shred/stats.rs @@ -23,6 +23,8 @@ pub struct ProcessShredsStats { num_data_shreds_hist: [usize; 5], // If the blockstore already has shreds for the broadcast slot. pub num_extant_slots: u64, + // When looking up chained merkle root from parent slot fails. + pub err_unknown_chained_merkle_root: u64, pub(crate) data_buffer_residual: usize, pub num_merkle_data_shreds: usize, pub num_merkle_coding_shreds: usize, @@ -89,6 +91,11 @@ impl ProcessShredsStats { ("sign_coding_time", self.sign_coding_elapsed, i64), ("coding_send_time", self.coding_send_elapsed, i64), ("num_extant_slots", self.num_extant_slots, i64), + ( + "err_unknown_chained_merkle_root", + self.err_unknown_chained_merkle_root, + i64 + ), ("data_buffer_residual", self.data_buffer_residual, i64), ("num_data_shreds_07", self.num_data_shreds_hist[0], i64), ("num_data_shreds_15", self.num_data_shreds_hist[1], i64), @@ -161,6 +168,7 @@ impl AddAssign for ProcessShredsStats { coalesce_elapsed, num_data_shreds_hist, num_extant_slots, + err_unknown_chained_merkle_root, data_buffer_residual, num_merkle_data_shreds, num_merkle_coding_shreds, @@ -175,6 +183,7 @@ impl AddAssign for ProcessShredsStats { self.get_leader_schedule_elapsed += get_leader_schedule_elapsed; self.coalesce_elapsed += coalesce_elapsed; self.num_extant_slots += num_extant_slots; + self.err_unknown_chained_merkle_root += err_unknown_chained_merkle_root; self.data_buffer_residual += data_buffer_residual; self.num_merkle_data_shreds += num_merkle_data_shreds; self.num_merkle_coding_shreds += num_merkle_coding_shreds; diff --git a/ledger/src/slot_stats.rs b/ledger/src/slot_stats.rs index 9033c3d1600f89..14e363960645a7 100644 --- a/ledger/src/slot_stats.rs +++ b/ledger/src/slot_stats.rs @@ -131,8 +131,8 @@ impl SlotsStats { .unwrap_or(-1); datapoint_info!( "shred_insert_is_full", - ("total_time_ms", total_time_ms, i64), ("slot", slot, i64), + ("total_time_ms", total_time_ms, i64), ("last_index", last_index, i64), ("num_repaired", num_repaired, i64), ("num_recovered", num_recovered, i64), diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 21606164cc27e4..537dd6495f32e1 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -13,7 +13,6 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { voting_disabled: config.voting_disabled, account_paths: config.account_paths.clone(), account_snapshot_paths: config.account_snapshot_paths.clone(), - account_shrink_paths: config.account_shrink_paths.clone(), rpc_config: config.rpc_config.clone(), on_start_geyser_plugin_config_files: config.on_start_geyser_plugin_config_files.clone(), rpc_addrs: config.rpc_addrs, diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 02953c632a80c3..6f7de16df296b1 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -2211,7 +2211,6 @@ fn create_snapshot_to_hard_fork( .unwrap() .0, ], - None, Some(&snapshot_config), process_options, None, @@ -5609,11 +5608,11 @@ fn test_invalid_forks_persisted_on_restart() { .entries_to_shreds( &majority_keypair, &entries, - true, // is_full_slot - None, // chained_merkle_root - 0, // next_shred_index, - 0, // next_code_index - false, // merkle_variant + true, // is_full_slot + None, // chained_merkle_root + 0, // next_shred_index, + 0, // next_code_index + true, // merkle_variant &ReedSolomonCache::default(), &mut ProcessShredsStats::default(), ) diff --git a/net/net.sh b/net/net.sh index fd25d429be3aa6..fe52116250545d 100755 --- a/net/net.sh +++ b/net/net.sh @@ -191,7 +191,7 @@ build() { if [[ $(uname) != Linux || ! " ${supported[*]} " =~ $(lsb_release -sr) ]]; then # shellcheck source=ci/rust-version.sh source "$SOLANA_ROOT"/ci/rust-version.sh - MAYBE_DOCKER="ci/docker-run.sh $rust_stable_docker_image" + MAYBE_DOCKER="ci/docker-run.sh ${ci_docker_image:?}" fi SECONDS=0 ( diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index a92da7bd001bbe..19f5f7486ea330 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -137,8 +137,6 @@ pub struct LoadedProgram { pub deployment_slot: Slot, /// Slot in which this entry will become active (can be in the future) pub effective_slot: Slot, - /// Optional expiration slot for this entry, after which it is treated as non-existent - pub maybe_expiration_slot: Option, /// How often this entry was used by a transaction pub tx_usage_counter: AtomicU64, /// How often this entry was used by an instruction @@ -150,22 +148,24 @@ pub struct LoadedProgram { /// Global cache statistics for [LoadedPrograms]. #[derive(Debug, Default)] pub struct Stats { - /// a program was requested + /// a program was already in the cache pub hits: AtomicU64, - /// a program was polled during cooperative loading + /// a program was not found and loaded instead pub misses: AtomicU64, /// a compiled executable was unloaded pub evictions: HashMap, - /// a program was loaded + /// an unloaded program was loaded again (opposite of eviction) + pub reloads: AtomicU64, + /// a program was loaded or un/re/deployed pub insertions: AtomicU64, - /// a program was reloaded or redeployed + /// a program was loaded but can not be extracted on its own fork anymore + pub lost_insertions: AtomicU64, + /// a program which was already in the cache was reloaded by mistake pub replacements: AtomicU64, /// a program was only used once before being unloaded pub one_hit_wonders: AtomicU64, /// a program became unreachable in the fork graph because of rerooting pub prunes_orphan: AtomicU64, - /// a program got pruned because its expiration slot passed - pub prunes_expired: AtomicU64, /// a program got pruned because it was not recompiled for the next epoch pub prunes_environment: AtomicU64, /// the [SecondLevel] was empty because all slot versions got pruned @@ -177,12 +177,13 @@ impl Stats { pub fn submit(&self, slot: Slot) { let hits = self.hits.load(Ordering::Relaxed); let misses = self.misses.load(Ordering::Relaxed); + let evictions: u64 = self.evictions.values().sum(); + let reloads = self.reloads.load(Ordering::Relaxed); let insertions = self.insertions.load(Ordering::Relaxed); + let lost_insertions = self.insertions.load(Ordering::Relaxed); let replacements = self.replacements.load(Ordering::Relaxed); let one_hit_wonders = self.one_hit_wonders.load(Ordering::Relaxed); - let evictions: u64 = self.evictions.values().sum(); let prunes_orphan = self.prunes_orphan.load(Ordering::Relaxed); - let prunes_expired = self.prunes_expired.load(Ordering::Relaxed); let prunes_environment = self.prunes_environment.load(Ordering::Relaxed); let empty_entries = self.empty_entries.load(Ordering::Relaxed); datapoint_info!( @@ -191,17 +192,18 @@ impl Stats { ("hits", hits, i64), ("misses", misses, i64), ("evictions", evictions, i64), + ("reloads", reloads, i64), ("insertions", insertions, i64), + ("lost_insertions", lost_insertions, i64), ("replacements", replacements, i64), ("one_hit_wonders", one_hit_wonders, i64), ("prunes_orphan", prunes_orphan, i64), - ("prunes_expired", prunes_expired, i64), ("prunes_environment", prunes_environment, i64), ("empty_entries", empty_entries, i64), ); debug!( - "Loaded Programs Cache Stats -- Hits: {}, Misses: {}, Evictions: {}, Insertions: {}, Replacements: {}, One-Hit-Wonders: {}, Prunes-Orphan: {}, Prunes-Expired: {}, Prunes-Environment: {}, Empty: {}", - hits, misses, evictions, insertions, replacements, one_hit_wonders, prunes_orphan, prunes_expired, prunes_environment, empty_entries + "Loaded Programs Cache Stats -- Hits: {}, Misses: {}, Evictions: {}, Reloads: {}, Insertions: {} Lost-Insertions: {}, Replacements: {}, One-Hit-Wonders: {}, Prunes-Orphan: {}, Prunes-Environment: {}, Empty: {}", + hits, misses, evictions, reloads, insertions, lost_insertions, replacements, one_hit_wonders, prunes_orphan, prunes_environment, empty_entries ); if log_enabled!(log::Level::Trace) && !self.evictions.is_empty() { let mut evictions = self.evictions.iter().collect::>(); @@ -278,7 +280,6 @@ impl LoadedProgram { program_runtime_environment: ProgramRuntimeEnvironment, deployment_slot: Slot, effective_slot: Slot, - maybe_expiration_slot: Option, elf_bytes: &[u8], account_size: usize, metrics: &mut LoadProgramMetrics, @@ -288,7 +289,6 @@ impl LoadedProgram { program_runtime_environment, deployment_slot, effective_slot, - maybe_expiration_slot, elf_bytes, account_size, metrics, @@ -309,7 +309,6 @@ impl LoadedProgram { program_runtime_environment: Arc>>, deployment_slot: Slot, effective_slot: Slot, - maybe_expiration_slot: Option, elf_bytes: &[u8], account_size: usize, metrics: &mut LoadProgramMetrics, @@ -319,7 +318,6 @@ impl LoadedProgram { program_runtime_environment, deployment_slot, effective_slot, - maybe_expiration_slot, elf_bytes, account_size, metrics, @@ -332,7 +330,6 @@ impl LoadedProgram { program_runtime_environment: Arc>>, deployment_slot: Slot, effective_slot: Slot, - maybe_expiration_slot: Option, elf_bytes: &[u8], account_size: usize, metrics: &mut LoadProgramMetrics, @@ -377,7 +374,6 @@ impl LoadedProgram { deployment_slot, account_size, effective_slot, - maybe_expiration_slot, tx_usage_counter: AtomicU64::new(0), program, ix_usage_counter: AtomicU64::new(0), @@ -391,7 +387,6 @@ impl LoadedProgram { account_size: self.account_size, deployment_slot: self.deployment_slot, effective_slot: self.effective_slot, - maybe_expiration_slot: self.maybe_expiration_slot, tx_usage_counter: AtomicU64::new(self.tx_usage_counter.load(Ordering::Relaxed)), ix_usage_counter: AtomicU64::new(self.ix_usage_counter.load(Ordering::Relaxed)), latest_access_slot: AtomicU64::new(self.latest_access_slot.load(Ordering::Relaxed)), @@ -412,7 +407,6 @@ impl LoadedProgram { deployment_slot, account_size, effective_slot: deployment_slot, - maybe_expiration_slot: None, tx_usage_counter: AtomicU64::new(0), program: LoadedProgramType::Builtin(BuiltinProgram::new_builtin(function_registry)), ix_usage_counter: AtomicU64::new(0), @@ -421,14 +415,11 @@ impl LoadedProgram { } pub fn new_tombstone(slot: Slot, reason: LoadedProgramType) -> Self { - let maybe_expiration_slot = matches!(reason, LoadedProgramType::DelayVisibility) - .then_some(slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET)); let tombstone = Self { program: reason, account_size: 0, deployment_slot: slot, effective_slot: slot, - maybe_expiration_slot, tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), latest_access_slot: AtomicU64::new(0), @@ -460,7 +451,8 @@ impl LoadedProgram { pub fn decayed_usage_counter(&self, now: Slot) -> u64 { let last_access = self.latest_access_slot.load(Ordering::Relaxed); - let decaying_for = now.saturating_sub(last_access); + // Shifting the u64 value for more than 63 will cause an overflow. + let decaying_for = std::cmp::min(63, now.saturating_sub(last_access)); self.tx_usage_counter.load(Ordering::Relaxed) >> decaying_for } } @@ -716,9 +708,7 @@ impl LoadedPrograms { let index = slot_versions .iter() .position(|at| at.effective_slot >= entry.effective_slot); - if let Some((existing, entry_index)) = - index.and_then(|index| slot_versions.get(index).map(|value| (value, index))) - { + if let Some(existing) = index.and_then(|index| slot_versions.get_mut(index)) { if existing.deployment_slot == entry.deployment_slot && existing.effective_slot == entry.effective_slot { @@ -733,17 +723,19 @@ impl LoadedPrograms { existing.ix_usage_counter.load(Ordering::Relaxed), Ordering::Relaxed, ); - slot_versions.remove(entry_index); + self.stats.reloads.fetch_add(1, Ordering::Relaxed); } else if existing.is_tombstone() != entry.is_tombstone() { // Either the old entry is tombstone and the new one is not. // (Let's give the new entry a chance). // Or, the old entry is not a tombstone and the new one is a tombstone. // (Remove the old entry, as the tombstone makes it obsolete). - slot_versions.remove(entry_index); + self.stats.insertions.fetch_add(1, Ordering::Relaxed); } else { self.stats.replacements.fetch_add(1, Ordering::Relaxed); return (true, existing.clone()); } + *existing = entry.clone(); + return (false, entry); } } self.stats.insertions.fetch_add(1, Ordering::Relaxed); @@ -830,13 +822,6 @@ impl LoadedPrograms { } }) .filter(|entry| { - // Remove expired - if let Some(expiration) = entry.maybe_expiration_slot { - if expiration <= new_root_slot { - self.stats.prunes_expired.fetch_add(1, Ordering::Relaxed); - return false; - } - } // Remove outdated environment of previous feature set if recompilation_phase_ends && !Self::matches_environment(entry, &self.environments) @@ -881,31 +866,13 @@ impl LoadedPrograms { } } - fn is_entry_usable( - entry: &Arc, - current_slot: Slot, - match_criteria: &LoadedProgramMatchCriteria, - ) -> bool { - if entry - .maybe_expiration_slot - .map(|expiration_slot| expiration_slot <= current_slot) - .unwrap_or(false) - { - // Found an entry that's already expired. Any further entries in the list - // are older than the current one. So treat the program as missing in the - // cache and return early. - return false; - } - - Self::matches_loaded_program_criteria(entry, match_criteria) - } - /// Extracts a subset of the programs relevant to a transaction batch /// and returns which program accounts the accounts DB needs to load. pub fn extract( &mut self, search_for: &mut Vec<(Pubkey, (LoadedProgramMatchCriteria, u64))>, loaded_programs_for_tx_batch: &mut LoadedProgramsForTxBatch, + is_first_round: bool, ) -> Option<(Pubkey, u64)> { debug_assert!(self.fork_graph.is_some()); let locked_fork_graph = self.fork_graph.as_ref().unwrap().read().unwrap(); @@ -913,15 +880,14 @@ impl LoadedPrograms { search_for.retain(|(key, (match_criteria, usage_count))| { if let Some(second_level) = self.entries.get_mut(key) { for entry in second_level.slot_versions.iter().rev() { - let is_ancestor = matches!( - locked_fork_graph - .relationship(entry.deployment_slot, loaded_programs_for_tx_batch.slot), - BlockRelation::Ancestor - ); - if entry.deployment_slot <= self.latest_root_slot - || entry.deployment_slot == loaded_programs_for_tx_batch.slot - || is_ancestor + || matches!( + locked_fork_graph.relationship( + entry.deployment_slot, + loaded_programs_for_tx_batch.slot + ), + BlockRelation::Equal | BlockRelation::Ancestor + ) { let entry_to_return = if loaded_programs_for_tx_batch.slot >= entry.effective_slot @@ -929,14 +895,9 @@ impl LoadedPrograms { entry, &loaded_programs_for_tx_batch.environments, ) { - if !Self::is_entry_usable( - entry, - loaded_programs_for_tx_batch.slot, - match_criteria, - ) { + if !Self::matches_loaded_program_criteria(entry, match_criteria) { break; } - if let LoadedProgramType::Unloaded(_environment) = &entry.program { break; } @@ -980,13 +941,15 @@ impl LoadedPrograms { true }); drop(locked_fork_graph); - self.stats - .misses - .fetch_add(search_for.len() as u64, Ordering::Relaxed); - self.stats.hits.fetch_add( - loaded_programs_for_tx_batch.entries.len() as u64, - Ordering::Relaxed, - ); + if is_first_round { + self.stats + .misses + .fetch_add(search_for.len() as u64, Ordering::Relaxed); + self.stats.hits.fetch_add( + loaded_programs_for_tx_batch.entries.len() as u64, + Ordering::Relaxed, + ); + } cooperative_loading_task } @@ -1003,6 +966,20 @@ impl LoadedPrograms { Some((slot, std::thread::current().id())) ); second_level.cooperative_loading_lock = None; + // Check that it will be visible to our own fork once inserted + if loaded_program.deployment_slot > self.latest_root_slot + && !matches!( + self.fork_graph + .as_ref() + .unwrap() + .read() + .unwrap() + .relationship(loaded_program.deployment_slot, slot), + BlockRelation::Equal | BlockRelation::Ancestor + ) + { + self.stats.lost_insertions.fetch_add(1, Ordering::Relaxed); + } self.assign_program(key, loaded_program); self.loading_task_waiter.notify(); } @@ -1226,27 +1203,12 @@ mod tests { deployment_slot: Slot, effective_slot: Slot, usage_counter: AtomicU64, - ) -> Arc { - new_test_loaded_program_with_usage_and_expiry( - deployment_slot, - effective_slot, - usage_counter, - None, - ) - } - - fn new_test_loaded_program_with_usage_and_expiry( - deployment_slot: Slot, - effective_slot: Slot, - usage_counter: AtomicU64, - expiry: Option, ) -> Arc { Arc::new(LoadedProgram { program: LoadedProgramType::TestLoaded(MOCK_ENVIRONMENT.get().unwrap().clone()), account_size: 0, deployment_slot, effective_slot, - maybe_expiration_slot: expiry, tx_usage_counter: usage_counter, ix_usage_counter: AtomicU64::default(), latest_access_slot: AtomicU64::new(deployment_slot), @@ -1259,7 +1221,6 @@ mod tests { account_size: 0, deployment_slot, effective_slot, - maybe_expiration_slot: None, tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), latest_access_slot: AtomicU64::default(), @@ -1288,7 +1249,6 @@ mod tests { account_size: 0, deployment_slot: slot, effective_slot: slot.saturating_add(1), - maybe_expiration_slot: None, tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), latest_access_slot: AtomicU64::default(), @@ -1340,6 +1300,10 @@ mod tests { assert_eq!(program.decayed_usage_counter(19), 16); assert_eq!(program.decayed_usage_counter(20), 8); assert_eq!(program.decayed_usage_counter(21), 4); + + // Decay for 63 or more slots + assert_eq!(program.decayed_usage_counter(18 + 63), 0); + assert_eq!(program.decayed_usage_counter(100), 0); } #[test] @@ -1881,7 +1845,6 @@ mod tests { account_size: 0, deployment_slot: 20, effective_slot: 20, - maybe_expiration_slot: None, tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), latest_access_slot: AtomicU64::default(), @@ -2080,7 +2043,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 4)), ]; let mut extracted = LoadedProgramsForTxBatch::new(22, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 20, 22)); assert!(match_slot(&extracted, &program4, 0, 22)); @@ -2096,7 +2059,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(15, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 15)); assert!(match_slot(&extracted, &program2, 11, 15)); @@ -2119,7 +2082,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(18, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 18)); assert!(match_slot(&extracted, &program2, 11, 18)); @@ -2137,7 +2100,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(23, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 23)); assert!(match_slot(&extracted, &program2, 11, 23)); @@ -2155,7 +2118,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(11, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 11)); // program2 was updated at slot 11, but is not effective till slot 12. The result should contain a tombstone. @@ -2168,58 +2131,6 @@ mod tests { assert!(match_missing(&missing, &program3, false)); - // The following is a special case, where there's an expiration slot - let test_program = Arc::new(LoadedProgram { - program: LoadedProgramType::DelayVisibility, - account_size: 0, - deployment_slot: 19, - effective_slot: 19, - maybe_expiration_slot: Some(21), - tx_usage_counter: AtomicU64::default(), - ix_usage_counter: AtomicU64::default(), - latest_access_slot: AtomicU64::default(), - }); - assert!(!cache.replenish(program4, test_program).0); - - // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let mut missing = vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ]; - let mut extracted = LoadedProgramsForTxBatch::new(19, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); - - assert!(match_slot(&extracted, &program1, 0, 19)); - assert!(match_slot(&extracted, &program2, 11, 19)); - // Program4 deployed at slot 19 should not be expired yet - assert!(match_slot(&extracted, &program4, 19, 19)); - - assert!(match_missing(&missing, &program3, false)); - - // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 21 - // This would cause program4 deployed at slot 19 to be expired. - let mut missing = vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ]; - let mut extracted = LoadedProgramsForTxBatch::new(21, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); - - assert!(match_slot(&extracted, &program1, 0, 21)); - assert!(match_slot(&extracted, &program2, 11, 21)); - - assert!(match_missing(&missing, &program3, false)); - assert!(match_missing(&missing, &program4, false)); - - // Remove the expired entry to let the rest of the test continue - if let Some(second_level) = cache.entries.get_mut(&program4) { - second_level.slot_versions.pop(); - } - cache.prune(5, 0); // Fork graph after pruning @@ -2245,7 +2156,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(21, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); // Since the fork was pruned, we should not find the entry deployed at slot 20. assert!(match_slot(&extracted, &program1, 0, 21)); @@ -2262,7 +2173,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(27, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 27)); assert!(match_slot(&extracted, &program2, 11, 27)); @@ -2294,7 +2205,7 @@ mod tests { (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(23, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 23)); assert!(match_slot(&extracted, &program2, 11, 23)); @@ -2349,7 +2260,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(12, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 12)); assert!(match_slot(&extracted, &program2, 11, 12)); @@ -2369,7 +2280,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(12, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program2, 11, 12)); @@ -2439,7 +2350,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(19, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 19)); assert!(match_slot(&extracted, &program2, 11, 19)); @@ -2453,7 +2364,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(27, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 27)); assert!(match_slot(&extracted, &program2, 11, 27)); @@ -2467,7 +2378,7 @@ mod tests { (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(22, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 20, 22)); @@ -2475,117 +2386,6 @@ mod tests { assert!(match_missing(&missing, &program3, true)); } - #[test] - fn test_prune_expired() { - let mut cache = new_mock_cache::(); - - // Fork graph created for the test - // 0 - // / \ - // 10 5 - // | | - // 20 11 - // | | \ - // 22 15 25 - // | | - // 16 27 - // | - // 19 - // | - // 23 - - let mut fork_graph = TestForkGraphSpecific::default(); - fork_graph.insert_fork(&[0, 10, 20, 22]); - fork_graph.insert_fork(&[0, 5, 11, 12, 15, 16, 18, 19, 21, 23]); - fork_graph.insert_fork(&[0, 5, 11, 25, 27]); - let fork_graph = Arc::new(RwLock::new(fork_graph)); - cache.set_fork_graph(fork_graph); - - let program1 = Pubkey::new_unique(); - assert!(!cache.replenish(program1, new_test_loaded_program(10, 11)).0); - assert!(!cache.replenish(program1, new_test_loaded_program(20, 21)).0); - - let program2 = Pubkey::new_unique(); - assert!(!cache.replenish(program2, new_test_loaded_program(5, 6)).0); - assert!(!cache.replenish(program2, new_test_loaded_program(11, 12)).0); - - let program3 = Pubkey::new_unique(); - assert!(!cache.replenish(program3, new_test_loaded_program(25, 26)).0); - - // The following is a special case, where there's an expiration slot - let test_program = Arc::new(LoadedProgram { - program: LoadedProgramType::DelayVisibility, - account_size: 0, - deployment_slot: 11, - effective_slot: 11, - maybe_expiration_slot: Some(15), - tx_usage_counter: AtomicU64::default(), - ix_usage_counter: AtomicU64::default(), - latest_access_slot: AtomicU64::default(), - }); - assert!(!cache.replenish(program1, test_program).0); - - // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let mut missing = vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ]; - let mut extracted = LoadedProgramsForTxBatch::new(12, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); - - // Program1 deployed at slot 11 should not be expired yet - assert!(match_slot(&extracted, &program1, 11, 12)); - assert!(match_slot(&extracted, &program2, 11, 12)); - - assert!(match_missing(&missing, &program3, false)); - - // Testing fork 0 - 5 - 11 - 12 - 15 - 16 - 19 - 21 - 23 with current slot at 15 - // This would cause program4 deployed at slot 15 to be expired. - let mut missing = vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ]; - let mut extracted = LoadedProgramsForTxBatch::new(15, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); - - assert!(match_slot(&extracted, &program2, 11, 15)); - - assert!(match_missing(&missing, &program1, false)); - assert!(match_missing(&missing, &program3, false)); - - // Test that the program still exists in the cache, even though it is expired. - assert_eq!( - cache - .entries - .get(&program1) - .expect("Didn't find program1") - .slot_versions - .len(), - 3 - ); - - // New root 5 should not evict the expired entry for program1 - cache.prune(5, 0); - assert_eq!( - cache - .entries - .get(&program1) - .expect("Didn't find program1") - .slot_versions - .len(), - 1 - ); - - // Unlock the cooperative loading lock so that the subsequent prune can do its job - cache.finish_cooperative_loading_task(15, program1, new_test_loaded_program(0, 1)); - - // New root 15 should evict the expired entry for program1 - cache.prune(15, 0); - assert!(cache.entries.get(&program1).is_none()); - } - #[test] fn test_fork_prune_find_first_ancestor() { let mut cache = new_mock_cache::(); @@ -2614,7 +2414,7 @@ mod tests { let mut missing = vec![(program1, (LoadedProgramMatchCriteria::NoCriteria, 1))]; let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); // The cache should have the program deployed at slot 0 assert_eq!( @@ -2658,7 +2458,7 @@ mod tests { (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 20)); assert!(match_slot(&extracted, &program2, 10, 20)); @@ -2668,7 +2468,7 @@ mod tests { (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(6, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 5, 6)); assert!(match_missing(&missing, &program2, false)); @@ -2682,7 +2482,7 @@ mod tests { (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 20)); assert!(match_slot(&extracted, &program2, 10, 20)); @@ -2692,7 +2492,7 @@ mod tests { (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(6, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 6)); assert!(match_missing(&missing, &program2, false)); @@ -2706,7 +2506,7 @@ mod tests { (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), ]; let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); - cache.extract(&mut missing, &mut extracted); + cache.extract(&mut missing, &mut extracted, true); assert!(match_slot(&extracted, &program1, 0, 20)); assert!(match_missing(&missing, &program2, false)); @@ -2717,109 +2517,96 @@ mod tests { new_mock_cache::(); let tombstone = Arc::new(LoadedProgram::new_tombstone(0, LoadedProgramType::Closed)); - assert!(LoadedPrograms::::is_entry_usable( - &tombstone, - 0, - &LoadedProgramMatchCriteria::NoCriteria - )); - - assert!(LoadedPrograms::::is_entry_usable( - &tombstone, - 1, - &LoadedProgramMatchCriteria::Tombstone - )); + assert!( + LoadedPrograms::::matches_loaded_program_criteria( + &tombstone, + &LoadedProgramMatchCriteria::NoCriteria + ) + ); - assert!(LoadedPrograms::::is_entry_usable( - &tombstone, - 1, - &LoadedProgramMatchCriteria::NoCriteria - )); + assert!( + LoadedPrograms::::matches_loaded_program_criteria( + &tombstone, + &LoadedProgramMatchCriteria::Tombstone + ) + ); - assert!(LoadedPrograms::::is_entry_usable( - &tombstone, - 1, - &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) - )); + assert!( + LoadedPrograms::::matches_loaded_program_criteria( + &tombstone, + &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) + ) + ); - assert!(!LoadedPrograms::::is_entry_usable( - &tombstone, - 1, - &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) - )); + assert!( + !LoadedPrograms::::matches_loaded_program_criteria( + &tombstone, + &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) + ) + ); let program = new_test_loaded_program(0, 1); - assert!(LoadedPrograms::::is_entry_usable( - &program, - 0, - &LoadedProgramMatchCriteria::NoCriteria - )); - - assert!(!LoadedPrograms::::is_entry_usable( - &program, - 1, - &LoadedProgramMatchCriteria::Tombstone - )); + assert!( + LoadedPrograms::::matches_loaded_program_criteria( + &program, + &LoadedProgramMatchCriteria::NoCriteria + ) + ); - assert!(LoadedPrograms::::is_entry_usable( - &program, - 1, - &LoadedProgramMatchCriteria::NoCriteria - )); + assert!( + !LoadedPrograms::::matches_loaded_program_criteria( + &program, + &LoadedProgramMatchCriteria::Tombstone + ) + ); - assert!(LoadedPrograms::::is_entry_usable( - &program, - 1, - &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) - )); + assert!( + LoadedPrograms::::matches_loaded_program_criteria( + &program, + &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) + ) + ); - assert!(!LoadedPrograms::::is_entry_usable( - &program, - 1, - &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) - )); + assert!( + !LoadedPrograms::::matches_loaded_program_criteria( + &program, + &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) + ) + ); - let program = Arc::new(new_test_loaded_program_with_usage_and_expiry( + let program = Arc::new(new_test_loaded_program_with_usage( 0, 1, AtomicU64::default(), - Some(2), - )); - - assert!(LoadedPrograms::::is_entry_usable( - &program, - 0, - &LoadedProgramMatchCriteria::NoCriteria - )); - - assert!(LoadedPrograms::::is_entry_usable( - &program, - 1, - &LoadedProgramMatchCriteria::NoCriteria )); - assert!(!LoadedPrograms::::is_entry_usable( - &program, - 1, - &LoadedProgramMatchCriteria::Tombstone - )); + assert!( + LoadedPrograms::::matches_loaded_program_criteria( + &program, + &LoadedProgramMatchCriteria::NoCriteria + ) + ); - assert!(!LoadedPrograms::::is_entry_usable( - &program, - 2, - &LoadedProgramMatchCriteria::NoCriteria - )); + assert!( + !LoadedPrograms::::matches_loaded_program_criteria( + &program, + &LoadedProgramMatchCriteria::Tombstone + ) + ); - assert!(LoadedPrograms::::is_entry_usable( - &program, - 1, - &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) - )); + assert!( + LoadedPrograms::::matches_loaded_program_criteria( + &program, + &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) + ) + ); - assert!(!LoadedPrograms::::is_entry_usable( - &program, - 1, - &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) - )); + assert!( + !LoadedPrograms::::matches_loaded_program_criteria( + &program, + &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) + ) + ); } } diff --git a/program-runtime/src/prioritization_fee.rs b/program-runtime/src/prioritization_fee.rs index e77ae15aac7f21..398b8d310be854 100644 --- a/program-runtime/src/prioritization_fee.rs +++ b/program-runtime/src/prioritization_fee.rs @@ -10,15 +10,15 @@ pub enum PrioritizationFeeType { #[derive(Default, Debug, PartialEq, Eq)] pub struct PrioritizationFeeDetails { fee: u64, - priority: u64, + compute_unit_price: u64, } impl PrioritizationFeeDetails { pub fn new(fee_type: PrioritizationFeeType, compute_unit_limit: u64) -> Self { match fee_type { - PrioritizationFeeType::ComputeUnitPrice(cu_price) => { + PrioritizationFeeType::ComputeUnitPrice(compute_unit_price) => { let micro_lamport_fee: MicroLamports = - (cu_price as u128).saturating_mul(compute_unit_limit as u128); + (compute_unit_price as u128).saturating_mul(compute_unit_limit as u128); let fee = micro_lamport_fee .saturating_add(MICRO_LAMPORTS_PER_LAMPORT.saturating_sub(1) as u128) .checked_div(MICRO_LAMPORTS_PER_LAMPORT as u128) @@ -27,7 +27,7 @@ impl PrioritizationFeeDetails { Self { fee, - priority: cu_price, + compute_unit_price, } } } @@ -37,8 +37,8 @@ impl PrioritizationFeeDetails { self.fee } - pub fn get_priority(&self) -> u64 { - self.priority + pub fn get_compute_unit_price(&self) -> u64 { + self.compute_unit_price } } @@ -62,7 +62,7 @@ mod test { FeeDetails::new(FeeType::ComputeUnitPrice(MICRO_LAMPORTS_PER_LAMPORT - 1), 1), FeeDetails { fee: 1, - priority: MICRO_LAMPORTS_PER_LAMPORT - 1, + compute_unit_price: MICRO_LAMPORTS_PER_LAMPORT - 1, }, "should round up (<1.0) lamport fee to 1 lamport" ); @@ -71,7 +71,7 @@ mod test { FeeDetails::new(FeeType::ComputeUnitPrice(MICRO_LAMPORTS_PER_LAMPORT), 1), FeeDetails { fee: 1, - priority: MICRO_LAMPORTS_PER_LAMPORT, + compute_unit_price: MICRO_LAMPORTS_PER_LAMPORT, }, ); @@ -79,7 +79,7 @@ mod test { FeeDetails::new(FeeType::ComputeUnitPrice(MICRO_LAMPORTS_PER_LAMPORT + 1), 1), FeeDetails { fee: 2, - priority: MICRO_LAMPORTS_PER_LAMPORT + 1, + compute_unit_price: MICRO_LAMPORTS_PER_LAMPORT + 1, }, "should round up (>1.0) lamport fee to 2 lamports" ); @@ -88,7 +88,7 @@ mod test { FeeDetails::new(FeeType::ComputeUnitPrice(200), 100_000), FeeDetails { fee: 20, - priority: 200, + compute_unit_price: 200, }, ); @@ -99,7 +99,7 @@ mod test { ), FeeDetails { fee: u64::MAX, - priority: MICRO_LAMPORTS_PER_LAMPORT, + compute_unit_price: MICRO_LAMPORTS_PER_LAMPORT, }, ); @@ -107,7 +107,7 @@ mod test { FeeDetails::new(FeeType::ComputeUnitPrice(u64::MAX), u64::MAX), FeeDetails { fee: u64::MAX, - priority: u64::MAX, + compute_unit_price: u64::MAX, }, ); } diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index c4ab4507b27eae..b8b4fcdb332a09 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -26,6 +26,7 @@ solana-logger = { workspace = true } solana-program-runtime = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } +solana-svm = { workspace = true } solana-vote-program = { workspace = true } solana_rbpf = { workspace = true } test-case = { workspace = true } diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 32dbb276ee2c7a..20b9f5806e29c3 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -25,7 +25,6 @@ use { bank_forks::BankForks, commitment::BlockCommitmentCache, genesis_utils::{create_genesis_config_with_leader_ex, GenesisConfigInfo}, - runtime_config::RuntimeConfig, }, solana_sdk::{ account::{create_account_shared_data_for_test, Account, AccountSharedData}, @@ -46,6 +45,7 @@ use { stable_layout::stable_instruction::StableInstruction, sysvar::{Sysvar, SysvarId}, }, + solana_svm::runtime_config::RuntimeConfig, solana_vote_program::vote_state::{self, VoteState, VoteStateVersions}, std::{ cell::RefCell, diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 48d44b7187a658..21a7b5fed77257 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -81,7 +81,6 @@ pub fn load_program_from_bytes( program_runtime_environment, deployment_slot, effective_slot, - None, programdata, account_size, load_program_metrics, @@ -93,7 +92,6 @@ pub fn load_program_from_bytes( program_runtime_environment, deployment_slot, effective_slot, - None, programdata, account_size, load_program_metrics, @@ -4004,7 +4002,6 @@ mod tests { account_size: 0, deployment_slot: 0, effective_slot: 0, - maybe_expiration_slot: None, tx_usage_counter: AtomicU64::new(100), ix_usage_counter: AtomicU64::new(100), latest_access_slot: AtomicU64::new(0), @@ -4045,7 +4042,6 @@ mod tests { account_size: 0, deployment_slot: 0, effective_slot: 0, - maybe_expiration_slot: None, tx_usage_counter: AtomicU64::new(100), ix_usage_counter: AtomicU64::new(100), latest_access_slot: AtomicU64::new(0), diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index 20b413d23e7416..4764b23fe65e50 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -419,7 +419,6 @@ pub fn process_instruction_deploy( .clone(), deployment_slot, effective_slot, - None, programdata, buffer.get_data().len(), &mut load_program_metrics, @@ -660,7 +659,6 @@ mod tests { .clone(), 0, 0, - None, programdata, account.data().len(), &mut load_program_metrics, diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index a742b404df514a..14c7f285b4c465 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -834,9 +834,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "bytemuck" -version = "1.14.1" +version = "1.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed2490600f404f2b94c167e31d3ed1d5f3c225a0f3b80230053b3e0b7b962bd9" +checksum = "ea31d69bda4949c1c1562c1e6f042a1caefac98cdc8a298260a2ff41c1e2d42b" dependencies = [ "bytemuck_derive", ] @@ -1617,9 +1617,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "fd-lock" @@ -1936,7 +1936,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.1.0", + "indexmap 2.2.2", "slab", "tokio", "tokio-util 0.7.1", @@ -2287,9 +2287,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.1.0" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" dependencies = [ "equivalent", "hashbrown 0.14.1", @@ -2350,9 +2350,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.67" +version = "0.3.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a1d36f1235bc969acba30b7f5990b864423a6068a10f7c90ae8f0112e3a59d1" +checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" dependencies = [ "wasm-bindgen", ] @@ -2518,9 +2518,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.152" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libloading" @@ -3010,9 +3010,9 @@ dependencies = [ [[package]] name = "num-derive" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", @@ -3054,9 +3054,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", ] @@ -3900,15 +3900,6 @@ dependencies = [ "bitflags 1.3.2", ] -[[package]] -name = "redox_syscall" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "redox_users" version = "0.4.0" @@ -4109,9 +4100,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.28" +version = "0.38.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" dependencies = [ "bitflags 2.4.2", "errno", @@ -4353,11 +4344,11 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.30" +version = "0.9.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1bf28c79a99f70ee1f1d83d10c875d2e70618417fda01ad1785e027579d9d38" +checksum = "adf8a49373e98a4c5f0ceb5d05aa7c648d75f63774981ed95b7c7443bbd50c6e" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.2", "itoa", "ryu", "serde", @@ -4628,14 +4619,14 @@ dependencies = [ "fnv", "im", "index_list", - "indexmap 2.1.0", + "indexmap 2.2.2", "itertools", "lazy_static", "log", "lz4", "memmap2", "modular-bitfield", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "num_cpus", "num_enum 0.7.2", @@ -4678,7 +4669,7 @@ dependencies = [ "bincode", "bytemuck", "log", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "rustc_version", "serde", @@ -4855,7 +4846,7 @@ dependencies = [ "dashmap", "futures 0.3.30", "futures-util", - "indexmap 2.1.0", + "indexmap 2.2.2", "indicatif", "log", "quinn", @@ -4905,7 +4896,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.1.0", + "indexmap 2.2.2", "log", "rand 0.8.5", "rayon", @@ -4972,6 +4963,7 @@ dependencies = [ "solana-sdk", "solana-send-transaction-service", "solana-streamer", + "solana-svm", "solana-tpu-client", "solana-transaction-status", "solana-turbine", @@ -5155,7 +5147,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.1.0", + "indexmap 2.2.2", "itertools", "log", "lru", @@ -5243,6 +5235,7 @@ dependencies = [ "solana-stake-program", "solana-storage-bigtable", "solana-storage-proto", + "solana-svm", "solana-transaction-status", "solana-vote", "solana-vote-program", @@ -5408,7 +5401,7 @@ dependencies = [ "log", "memoffset 0.9.0", "num-bigint 0.4.4", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "parking_lot 0.12.1", "rand 0.8.5", @@ -5440,7 +5433,7 @@ dependencies = [ "itertools", "libc", "log", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "percentage", "rand 0.8.5", @@ -5476,6 +5469,7 @@ dependencies = [ "solana-program-runtime", "solana-runtime", "solana-sdk", + "solana-svm", "solana-vote-program", "solana_rbpf", "test-case", @@ -5545,7 +5539,7 @@ dependencies = [ "console", "dialoguer", "log", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "parking_lot 0.12.1", "qstring", @@ -5693,7 +5687,7 @@ dependencies = [ "memmap2", "mockall", "modular-bitfield", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "num_cpus", "num_enum 0.7.2", @@ -5724,6 +5718,7 @@ dependencies = [ "solana-rayon-threadlimit", "solana-sdk", "solana-stake-program", + "solana-svm", "solana-system-program", "solana-version", "solana-vote", @@ -6187,7 +6182,7 @@ dependencies = [ "libsecp256k1 0.6.0", "log", "memmap2", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "num_enum 0.7.2", "pbkdf2 0.11.0", @@ -6315,7 +6310,7 @@ dependencies = [ "crossbeam-channel", "futures-util", "histogram", - "indexmap 2.1.0", + "indexmap 2.2.2", "itertools", "libc", "log", @@ -6334,6 +6329,26 @@ dependencies = [ "x509-parser", ] +[[package]] +name = "solana-svm" +version = "1.18.0" +dependencies = [ + "itertools", + "log", + "percentage", + "rustc_version", + "solana-accounts-db", + "solana-bpf-loader-program", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "solana-loader-v4-program", + "solana-measure", + "solana-metrics", + "solana-program-runtime", + "solana-sdk", + "solana-system-program", +] + [[package]] name = "solana-system-program" version = "1.18.0" @@ -6372,6 +6387,7 @@ dependencies = [ "solana-runtime", "solana-sdk", "solana-streamer", + "solana-svm", "solana-tpu-client", "tokio", ] @@ -6396,7 +6412,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.1.0", + "indexmap 2.2.2", "indicatif", "log", "rayon", @@ -6557,6 +6573,7 @@ dependencies = [ "solana-send-transaction-service", "solana-storage-bigtable", "solana-streamer", + "solana-svm", "solana-test-validator", "solana-tpu-client", "solana-version", @@ -6603,7 +6620,7 @@ version = "1.18.0" dependencies = [ "bincode", "log", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "rustc_version", "serde", @@ -6642,7 +6659,7 @@ name = "solana-zk-token-proof-program" version = "1.18.0" dependencies = [ "bytemuck", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "solana-program-runtime", "solana-sdk", @@ -6663,7 +6680,7 @@ dependencies = [ "itertools", "lazy_static", "merlin", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "rand 0.7.3", "serde", @@ -6715,7 +6732,7 @@ checksum = "992d9c64c2564cc8f63a4b508bf3ebcdf2254b0429b13cd1d31adb6162432a5f" dependencies = [ "assert_matches", "borsh 0.10.3", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "solana-program", "spl-token", @@ -6786,7 +6803,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "249e0318493b6bcf27ae9902600566c689b7dfba9f1bdff5893e92253374e78c" dependencies = [ - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "solana-program", "spl-program-error-derive", @@ -6842,7 +6859,7 @@ checksum = "d697fac19fd74ff472dfcc13f0b442dd71403178ce1de7b5d16f83a33561c059" dependencies = [ "arrayref", "bytemuck", - "num-derive 0.4.1", + "num-derive 0.4.2", "num-traits", "num_enum 0.7.2", "solana-program", @@ -7121,13 +7138,12 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.9.0" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" +checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" dependencies = [ "cfg-if 1.0.0", "fastrand", - "redox_syscall 0.4.1", "rustix", "windows-sys 0.52.0", ] @@ -7453,7 +7469,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.2", "toml_datetime", "winnow", ] @@ -7818,9 +7834,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1223296a201415c7fad14792dbefaace9bd52b62d33453ade1c5b5f07555406" +checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -7828,9 +7844,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcdc935b63408d58a32f8cc9738a0bffd8f05cc7c002086c6ef20b7312ad9dcd" +checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" dependencies = [ "bumpalo", "log", @@ -7855,9 +7871,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c238561b2d428924c49815533a8b9121c664599558a5d9ec51f8a1740a999" +checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7865,9 +7881,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bae1abb6806dc1ad9e560ed242107c0f6c84335f1749dd4e8ddb012ebd5e25a7" +checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", @@ -7878,9 +7894,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.90" +version = "0.2.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d91413b1c31d7539ba5ef2451af3f0b833a005eb27a631cec32bc0635a8602b" +checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" [[package]] name = "web-sys" diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 16d78a913bc90b..5cc5b82344e0d1 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -7,7 +7,7 @@ use { base64::{prelude::BASE64_STANDARD, Engine}, bincode::{config::Options, serialize}, crossbeam_channel::{unbounded, Receiver, Sender}, - jsonrpc_core::{futures::future, types::error, BoxFuture, Error, ErrorCode, Metadata, Result}, + jsonrpc_core::{futures::future, types::error, BoxFuture, Error, Metadata, Result}, jsonrpc_derive::rpc, solana_account_decoder::{ parse_token::{is_known_spl_token_id, token_amount_to_ui_amount, UiTokenAmount}, @@ -62,10 +62,6 @@ use { clock::{Slot, UnixTimestamp, MAX_RECENT_BLOCKHASHES}, commitment_config::{CommitmentConfig, CommitmentLevel}, epoch_info::EpochInfo, - epoch_rewards_hasher::EpochRewardsHasher, - epoch_rewards_partition_data::{ - get_epoch_rewards_partition_data_address, EpochRewardsPartitionDataVersion, - }, epoch_schedule::EpochSchedule, exit::Exit, feature_set, @@ -523,38 +519,6 @@ impl JsonRpcRequestProcessor { }) } - async fn get_reward_map( - &self, - slot: Slot, - addresses: &[String], - reward_type_filter: &F, - config: &RpcEpochConfig, - ) -> Result> - where - F: Fn(RewardType) -> bool, - { - let Ok(Some(block)) = self - .get_block( - slot, - Some(RpcBlockConfig::rewards_with_commitment(config.commitment).into()), - ) - .await - else { - return Err(RpcCustomError::BlockNotAvailable { slot }.into()); - }; - - Ok(block - .rewards - .unwrap_or_default() - .into_iter() - .filter(|reward| { - reward.reward_type.is_some_and(reward_type_filter) - && addresses.contains(&reward.pubkey) - }) - .map(|reward| (reward.clone().pubkey, (reward, slot))) - .collect()) - } - pub async fn get_inflation_reward( &self, addresses: Vec, @@ -563,20 +527,18 @@ impl JsonRpcRequestProcessor { let config = config.unwrap_or_default(); let epoch_schedule = self.get_epoch_schedule(); let first_available_block = self.get_first_available_block().await; - let slot_context = RpcContextConfig { - commitment: config.commitment, - min_context_slot: config.min_context_slot, - }; let epoch = match config.epoch { Some(epoch) => epoch, None => epoch_schedule - .get_epoch(self.get_slot(slot_context)?) + .get_epoch(self.get_slot(RpcContextConfig { + commitment: config.commitment, + min_context_slot: config.min_context_slot, + })?) .saturating_sub(1), }; - // Rewards for this epoch are found in the first confirmed block of the next epoch - let rewards_epoch = epoch.saturating_add(1); - let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(rewards_epoch); + // Rewards for this epoch are found in the first confirmed block of the next epoch + let first_slot_in_epoch = epoch_schedule.get_first_slot_in_epoch(epoch.saturating_add(1)); if first_slot_in_epoch < first_available_block { if self.bigtable_ledger_storage.is_some() { return Err(RpcCustomError::LongTermStorageSlotSkipped { @@ -592,8 +554,6 @@ impl JsonRpcRequestProcessor { } } - let bank = self.get_bank_with_config(slot_context)?; - let first_confirmed_block_in_epoch = *self .get_blocks_with_limit(first_slot_in_epoch, 1, config.commitment) .await? @@ -601,94 +561,44 @@ impl JsonRpcRequestProcessor { .ok_or(RpcCustomError::BlockNotAvailable { slot: first_slot_in_epoch, })?; - let partitioned_epoch_reward_enabled_slot = bank - .feature_set - .activated_slot(&feature_set::enable_partitioned_epoch_reward::id()); - let partitioned_epoch_reward_enabled = partitioned_epoch_reward_enabled_slot - .map(|slot| slot <= first_confirmed_block_in_epoch) - .unwrap_or(false); - - let mut reward_map: HashMap = { - let addresses: Vec = - addresses.iter().map(|pubkey| pubkey.to_string()).collect(); - self.get_reward_map( + let Ok(Some(first_confirmed_block)) = self + .get_block( first_confirmed_block_in_epoch, - &addresses, - &|reward_type| -> bool { - reward_type == RewardType::Voting - || (!partitioned_epoch_reward_enabled && reward_type == RewardType::Staking) - }, - &config, + Some(RpcBlockConfig::rewards_with_commitment(config.commitment).into()), ) - .await? + .await + else { + return Err(RpcCustomError::BlockNotAvailable { + slot: first_confirmed_block_in_epoch, + } + .into()); }; - if partitioned_epoch_reward_enabled { - let partition_data_address = get_epoch_rewards_partition_data_address(rewards_epoch); - let partition_data_account = - bank.get_account(&partition_data_address) - .ok_or_else(|| Error { - code: ErrorCode::InternalError, - message: format!( - "Partition data account not found for epoch {:?} at {:?}", - epoch, partition_data_address - ), - data: None, - })?; - let EpochRewardsPartitionDataVersion::V0(partition_data) = - bincode::deserialize(partition_data_account.data()) - .map_err(|_| Error::internal_error())?; - let hasher = EpochRewardsHasher::new( - partition_data.num_partitions, - &partition_data.parent_blockhash, - ); - let mut partition_index_addresses: HashMap> = HashMap::new(); - for address in addresses.iter() { - let address_string = address.to_string(); - // Skip this address if (Voting) rewards were already found in - // the first block of the epoch - if !reward_map.contains_key(&address_string) { - let partition_index = hasher.clone().hash_address_to_partition(address); - partition_index_addresses - .entry(partition_index) - .and_modify(|list| list.push(address_string.clone())) - .or_insert(vec![address_string]); - } - } + let addresses: Vec = addresses + .into_iter() + .map(|pubkey| pubkey.to_string()) + .collect(); - let block_list = self - .get_blocks_with_limit( - first_confirmed_block_in_epoch + 1, - partition_data.num_partitions, - config.commitment, - ) - .await?; - - for (partition_index, addresses) in partition_index_addresses.iter() { - let slot = *block_list - .get(*partition_index) - .ok_or_else(Error::internal_error)?; - - let index_reward_map = self - .get_reward_map( - slot, - addresses, - &|reward_type| -> bool { reward_type == RewardType::Staking }, - &config, - ) - .await?; - reward_map.extend(index_reward_map); - } - } + let reward_hash: HashMap = first_confirmed_block + .rewards + .unwrap_or_default() + .into_iter() + .filter_map(|reward| match reward.reward_type? { + RewardType::Staking | RewardType::Voting => addresses + .contains(&reward.pubkey) + .then(|| (reward.clone().pubkey, reward)), + _ => None, + }) + .collect(); let rewards = addresses .iter() .map(|address| { - if let Some((reward, slot)) = reward_map.get(&address.to_string()) { + if let Some(reward) = reward_hash.get(address) { return Some(RpcInflationReward { epoch, - effective_slot: *slot, + effective_slot: first_confirmed_block_in_epoch, amount: reward.lamports.unsigned_abs(), post_balance: reward.post_balance, commission: reward.commission, @@ -697,6 +607,7 @@ impl JsonRpcRequestProcessor { None }) .collect(); + Ok(rewards) } diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index 68640362b2182c..82c7d48f01f21e 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -212,10 +212,6 @@ pub(crate) mod tests { crossbeam_channel::unbounded, dashmap::DashMap, solana_account_decoder::parse_token::token_amount_to_ui_amount, - solana_accounts_db::{ - nonce_info::{NonceFull, NoncePartial}, - rent_debits::RentDebits, - }, solana_ledger::{genesis_utils::create_genesis_config, get_tmp_ledger_path_auto_delete}, solana_runtime::bank::{Bank, TransactionBalancesSet}, solana_sdk::{ @@ -226,7 +222,9 @@ pub(crate) mod tests { message::{LegacyMessage, Message, MessageHeader, SanitizedMessage}, nonce::{self, state::DurableNonce}, nonce_account, + nonce_info::{NonceFull, NoncePartial}, pubkey::Pubkey, + rent_debits::RentDebits, signature::{Keypair, Signature, Signer}, system_transaction, transaction::{ diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 355c858597895f..b14ffab2076ca3 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -64,6 +64,7 @@ solana-program-runtime = { workspace = true } solana-rayon-threadlimit = { workspace = true } solana-sdk = { workspace = true } solana-stake-program = { workspace = true } +solana-svm = { workspace = true } solana-system-program = { workspace = true } solana-version = { workspace = true } solana-vote = { workspace = true } diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index 7efc0a11ac0d75..fb81ce4716553e 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -16,7 +16,6 @@ use { accounts_index::{AccountSecondaryIndexes, ScanConfig}, ancestors::Ancestors, epoch_accounts_hash::EpochAccountsHash, - rent_collector::RentCollector, }, solana_runtime::bank::*, solana_sdk::{ @@ -25,6 +24,7 @@ use { hash::Hash, lamports::LamportsError, pubkey::Pubkey, + rent_collector::RentCollector, sysvar::epoch_schedule::EpochSchedule, }, std::{ diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 6f5cd9a07f607b..f2722983dcdbdf 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -48,7 +48,6 @@ use { epoch_rewards_hasher::hash_rewards_into_partitions, epoch_stakes::{EpochStakes, NodeVoteAccounts}, installed_scheduler_pool::{BankWithScheduler, InstalledSchedulerRwLock}, - runtime_config::RuntimeConfig, serde_snapshot::BankIncrementalSnapshotPersistence, snapshot_hash::SnapshotHash, stake_account::StakeAccount, @@ -59,17 +58,12 @@ use { }, stakes::{InvalidCacheEntryReason, Stakes, StakesCache, StakesEnum}, status_cache::{SlotDelta, StatusCache}, - svm::{ - account_loader::load_accounts, - transaction_account_state_info::TransactionAccountStateInfo, - }, transaction_batch::TransactionBatch, }, byteorder::{ByteOrder, LittleEndian}, dashmap::{DashMap, DashSet}, itertools::izip, log::*, - percentage::Percentage, rayon::{ iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}, slice::ParallelSlice, @@ -77,11 +71,7 @@ use { }, serde::Serialize, solana_accounts_db::{ - account_overrides::AccountOverrides, - accounts::{ - AccountAddressFilter, Accounts, LoadedTransaction, PubkeyAccountSlot, - TransactionLoadResult, - }, + accounts::{AccountAddressFilter, Accounts, PubkeyAccountSlot, TransactionLoadResult}, accounts_db::{ AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, CalcAccountsHashDataSource, VerifyAccountsHashAndLamportsConfig, @@ -95,18 +85,12 @@ use { ancestors::{Ancestors, AncestorsForSerialization}, blockhash_queue::BlockhashQueue, epoch_accounts_hash::EpochAccountsHash, - nonce_info::{NonceInfo, NoncePartial}, partitioned_rewards::PartitionedEpochRewardsConfig, - rent_collector::{CollectedInfo, RentCollector, RENT_EXEMPT_RENT_EPOCH}, - rent_debits::RentDebits, sorted_storages::SortedStorages, - stake_rewards::{RewardInfo, StakeReward}, + stake_rewards::StakeReward, storable_accounts::StorableAccounts, - transaction_error_metrics::TransactionErrorMetrics, transaction_results::{ - inner_instructions_list_from_instruction_trace, DurableNonceFee, - TransactionCheckResult, TransactionExecutionDetails, TransactionExecutionResult, - TransactionResults, + TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, }, }, solana_bpf_loader_program::syscalls::create_program_runtime_environment_v1, @@ -115,27 +99,17 @@ use { solana_measure::{measure, measure::Measure, measure_us}, solana_perf::perf_libs, solana_program_runtime::{ - compute_budget::ComputeBudget, compute_budget_processor::process_compute_budget_instructions, invoke_context::BuiltinFunctionWithContext, - loaded_programs::{ - LoadProgramMetrics, LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, - LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, - ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET, - }, - log_collector::LogCollector, - message_processor::MessageProcessor, - sysvar_cache::SysvarCache, - timings::{ExecuteDetailsTimings, ExecuteTimingType, ExecuteTimings}, + loaded_programs::{LoadedProgram, LoadedProgramType, LoadedPrograms}, + timings::{ExecuteTimingType, ExecuteTimings}, }, solana_sdk::{ account::{ create_account_shared_data_with_fields as create_account, create_executable_meta, from_account, Account, AccountSharedData, InheritableAccountFields, ReadableAccount, - WritableAccount, PROGRAM_OWNERS, + WritableAccount, }, - account_utils::StateMut, - bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::{ BankId, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_HASHES_PER_TICK, DEFAULT_TICKS_PER_SECOND, INITIAL_RENT_EPOCH, MAX_PROCESSING_AGE, @@ -144,10 +118,6 @@ use { UPDATED_HASHES_PER_TICK4, UPDATED_HASHES_PER_TICK5, UPDATED_HASHES_PER_TICK6, }, epoch_info::EpochInfo, - epoch_rewards_partition_data::{ - get_epoch_rewards_partition_data_address, EpochRewardsPartitionDataVersion, - PartitionData, - }, epoch_schedule::EpochSchedule, feature, feature_set::{self, include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, @@ -159,17 +129,19 @@ use { incinerator, inflation::Inflation, inner_instruction::InnerInstructions, - instruction::InstructionError, - loader_v4::{self, LoaderV4State, LoaderV4Status}, message::{AccountKeys, SanitizedMessage}, native_loader, native_token::LAMPORTS_PER_SOL, nonce::{self, state::DurableNonce, NONCED_TX_MARKER_IX_INDEX}, nonce_account, + nonce_info::{NonceInfo, NoncePartial}, packet::PACKET_DATA_SIZE, precompiles::get_precompiles, pubkey::Pubkey, rent::RentDue, + rent_collector::{CollectedInfo, RentCollector, RENT_EXEMPT_RENT_EPOCH}, + rent_debits::RentDebits, + reward_info::RewardInfo, saturating_add_assign, signature::{Keypair, Signature}, slot_hashes::SlotHashes, @@ -182,30 +154,35 @@ use { self, MessageHash, Result, SanitizedTransaction, Transaction, TransactionError, TransactionVerificationMode, VersionedTransaction, MAX_TX_ACCOUNT_LOCKS, }, - transaction_context::{ - ExecutionRecord, TransactionAccount, TransactionContext, TransactionReturnData, - }, + transaction_context::{TransactionAccount, TransactionReturnData}, }, solana_stake_program::stake_state::{ self, InflationPointCalculationEvent, PointValue, StakeStateV2, }, + solana_svm::{ + account_loader::TransactionCheckResult, + account_overrides::AccountOverrides, + runtime_config::RuntimeConfig, + transaction_error_metrics::TransactionErrorMetrics, + transaction_processor::{ + TransactionBatchProcessor, TransactionLogMessages, TransactionProcessingCallback, + }, + }, solana_system_program::{get_system_account_kind, SystemAccountKind}, solana_vote::vote_account::{VoteAccount, VoteAccounts, VoteAccountsHashMap}, solana_vote_program::vote_state::VoteState, std::{ borrow::Cow, - cell::RefCell, - collections::{hash_map::Entry, HashMap, HashSet}, + collections::{HashMap, HashSet}, convert::TryFrom, fmt, mem, ops::{AddAssign, RangeInclusive}, path::PathBuf, - rc::Rc, slice, sync::{ atomic::{ AtomicBool, AtomicI64, AtomicU64, AtomicUsize, - Ordering::{self, AcqRel, Acquire, Relaxed}, + Ordering::{AcqRel, Acquire, Relaxed}, }, Arc, LockResult, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, }, @@ -314,14 +291,6 @@ impl BankRc { } } -enum ProgramAccountLoadResult { - AccountNotFound, - InvalidAccountData(ProgramRuntimeEnvironment), - ProgramOfLoaderV1orV2(AccountSharedData), - ProgramOfLoaderV3(AccountSharedData, AccountSharedData, Slot), - ProgramOfLoaderV4(AccountSharedData, Slot), -} - pub struct LoadAndExecuteTransactionsOutput { pub loaded_transactions: Vec, // Vector of results indicating whether a transaction was executed or could not @@ -339,13 +308,6 @@ pub struct LoadAndExecuteTransactionsOutput { pub error_counters: TransactionErrorMetrics, } -pub struct LoadAndExecuteSanitizedTransactionsOutput { - pub loaded_transactions: Vec, - // Vector of results indicating whether a transaction was executed or could not - // be executed. Note executed transactions can still have failed! - pub execution_results: Vec, -} - pub struct TransactionSimulationResult { pub result: Result<()>, pub logs: TransactionLogMessages, @@ -370,9 +332,6 @@ impl TransactionBalancesSet { } pub type TransactionBalances = Vec>; -/// A list of log messages emitted during a transaction -pub type TransactionLogMessages = Vec; - #[derive(Serialize, Deserialize, AbiExample, AbiEnumVisitor, Debug, PartialEq, Eq)] pub enum TransactionLogCollectorFilter { All, @@ -576,15 +535,14 @@ impl PartialEq for Bank { freeze_started: _, vote_only_bank: _, cost_tracker: _, - sysvar_cache: _, accounts_data_size_initial: _, accounts_data_size_delta_on_chain: _, accounts_data_size_delta_off_chain: _, fee_structure: _, incremental_snapshot_persistence: _, loaded_programs_cache: _, - check_program_modification_slot: _, epoch_reward_status: _, + transaction_processor: _, // Ignore new fields explicitly if they do not impact PartialEq. // Adding ".." will remove compile-time checks that if a new field // is added to the struct, this PartialEq is accordingly updated. @@ -773,7 +731,7 @@ pub struct Bank { rent_collector: RentCollector, /// initialized from genesis - epoch_schedule: EpochSchedule, + pub(crate) epoch_schedule: EpochSchedule, /// inflation specs inflation: Arc>, @@ -792,7 +750,7 @@ pub struct Bank { builtin_programs: HashSet, /// Optional config parameters that can override runtime behavior - runtime_config: Arc, + pub(crate) runtime_config: Arc, /// Protocol-level rewards that were distributed by this bank pub rewards: RwLock>, @@ -824,8 +782,6 @@ pub struct Bank { cost_tracker: RwLock, - sysvar_cache: RwLock, - /// The initial accounts data size at the start of this Bank, before processing any transactions/etc accounts_data_size_initial: u64, /// The change to accounts data size in this Bank, due on-chain events (i.e. transactions) @@ -844,9 +800,9 @@ pub struct Bank { pub loaded_programs_cache: Arc>>, - pub check_program_modification_slot: bool, - epoch_reward_status: EpochRewardStatus, + + transaction_processor: TransactionBatchProcessor, } struct VoteWithStakeDelegations { @@ -904,7 +860,6 @@ struct PartitionedRewardsCalculation { foundation_rate: f64, prev_epoch_duration_in_years: f64, capitalization: u64, - parent_blockhash: Hash, } /// result of calculating the stake rewards at beginning of new epoch @@ -922,8 +877,6 @@ struct CalculateRewardsAndDistributeVoteRewardsResult { distributed_rewards: u64, /// stake rewards that still need to be distributed, grouped by partition stake_rewards_by_partition: Vec, - /// blockhash of parent, used to create EpochRewardsHasher - parent_blockhash: Hash, } pub(crate) type StakeRewards = Vec; @@ -1026,7 +979,6 @@ impl Bank { freeze_started: AtomicBool::default(), vote_only_bank: false, cost_tracker: RwLock::::default(), - sysvar_cache: RwLock::::default(), accounts_data_size_initial: 0, accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), @@ -1035,10 +987,19 @@ impl Bank { Slot::default(), Epoch::default(), ))), - check_program_modification_slot: false, epoch_reward_status: EpochRewardStatus::default(), + transaction_processor: TransactionBatchProcessor::default(), }; + bank.transaction_processor = TransactionBatchProcessor::new( + bank.slot, + bank.epoch, + bank.epoch_schedule.clone(), + bank.fee_structure.clone(), + bank.runtime_config.clone(), + bank.loaded_programs_cache.clone(), + ); + let accounts_data_size_initial = bank.get_total_accounts_stats().unwrap().data_len as u64; bank.accounts_data_size_initial = accounts_data_size_initial; @@ -1339,16 +1300,24 @@ impl Bank { )), freeze_started: AtomicBool::new(false), cost_tracker: RwLock::new(CostTracker::default()), - sysvar_cache: RwLock::new(SysvarCache::default()), accounts_data_size_initial, accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), fee_structure: parent.fee_structure.clone(), loaded_programs_cache: parent.loaded_programs_cache.clone(), - check_program_modification_slot: false, epoch_reward_status: parent.epoch_reward_status.clone(), + transaction_processor: TransactionBatchProcessor::default(), }; + new.transaction_processor = TransactionBatchProcessor::new( + new.slot, + new.epoch, + new.epoch_schedule.clone(), + new.fee_structure.clone(), + new.runtime_config.clone(), + new.loaded_programs_cache.clone(), + ); + let (_, ancestors_time_us) = measure_us!({ let mut ancestors = Vec::with_capacity(1 + new.parents().len()); ancestors.push(new.slot()); @@ -1615,7 +1584,6 @@ impl Bank { total_rewards, distributed_rewards, stake_rewards_by_partition, - parent_blockhash, } = self.calculate_rewards_and_distribute_vote_rewards( parent_epoch, reward_calc_tracer, @@ -1623,11 +1591,9 @@ impl Bank { rewards_metrics, ); - let num_partitions = stake_rewards_by_partition.len(); - let slot = self.slot(); let credit_start = self.block_height() + self.get_reward_calculation_num_blocks(); - let credit_end_exclusive = credit_start + num_partitions as u64; + let credit_end_exclusive = credit_start + stake_rewards_by_partition.len() as u64; self.set_epoch_reward_status_active(stake_rewards_by_partition); @@ -1635,8 +1601,6 @@ impl Bank { // (total_rewards, distributed_rewards, credit_end_exclusive), total capital will increase by (total_rewards - distributed_rewards) self.create_epoch_rewards_sysvar(total_rewards, distributed_rewards, credit_end_exclusive); - self.create_epoch_rewards_partition_data_account(num_partitions, parent_blockhash); - datapoint_info!( "epoch-rewards-status-update", ("start_slot", slot, i64), @@ -1843,7 +1807,6 @@ impl Bank { freeze_started: AtomicBool::new(fields.hash != Hash::default()), vote_only_bank: false, cost_tracker: RwLock::new(CostTracker::default()), - sysvar_cache: RwLock::new(SysvarCache::default()), accounts_data_size_initial, accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), @@ -1852,9 +1815,19 @@ impl Bank { fields.slot, fields.epoch, ))), - check_program_modification_slot: false, epoch_reward_status: fields.epoch_reward_status, + transaction_processor: TransactionBatchProcessor::default(), }; + + bank.transaction_processor = TransactionBatchProcessor::new( + bank.slot, + bank.epoch, + bank.epoch_schedule.clone(), + bank.fee_structure.clone(), + bank.runtime_config.clone(), + bank.loaded_programs_cache.clone(), + ); + bank.finish_init( genesis_config, additional_builtins, @@ -2402,7 +2375,6 @@ impl Bank { foundation_rate, prev_epoch_duration_in_years, capitalization, - parent_blockhash, } } @@ -2423,7 +2395,6 @@ impl Bank { foundation_rate, prev_epoch_duration_in_years, capitalization, - parent_blockhash, } = self.calculate_rewards_for_partitioning( prev_epoch, reward_calc_tracer, @@ -2493,7 +2464,6 @@ impl Bank { total_rewards: validator_rewards_paid + total_stake_rewards_lamports, distributed_rewards: validator_rewards_paid, stake_rewards_by_partition, - parent_blockhash, } } @@ -3340,7 +3310,7 @@ impl Bank { let pre_lamport = curr_stake_account.lamports(); let post_lamport = post_stake_account.lamports(); assert_eq!(pre_lamport + u64::try_from(reward_amount).unwrap(), post_lamport, - "stake account balance has changed since the reward calculation! account: {stake_pubkey}, pre balance: {pre_lamport}, post balance: {post_lamport}, rewards: {reward_amount}"); + "stake account balance has changed since the reward calculation! account: {stake_pubkey}, pre balance: {pre_lamport}, post balance: {post_lamport}, rewards: {reward_amount}"); } } } @@ -3607,40 +3577,6 @@ impl Bank { self.log_epoch_rewards_sysvar("update"); } - /// Create the persistent PDA containing the epoch-rewards data - fn create_epoch_rewards_partition_data_account( - &self, - num_partitions: usize, - parent_blockhash: Hash, - ) { - let epoch_rewards_partition_data = EpochRewardsPartitionDataVersion::V0(PartitionData { - num_partitions, - parent_blockhash, - }); - let address = get_epoch_rewards_partition_data_address(self.epoch()); - - let data_len = bincode::serialized_size(&epoch_rewards_partition_data).unwrap() as usize; - let account_balance = self.get_minimum_balance_for_rent_exemption(data_len); - let new_account = AccountSharedData::new_data( - account_balance, - &epoch_rewards_partition_data, - &solana_sdk::sysvar::id(), - ) - .unwrap(); - - info!( - "create epoch rewards partition data account {} {address} \ - {epoch_rewards_partition_data:?}", - self.slot - ); - - // Skip storing data account when we are testing partitioned - // rewards but feature is not yet active - if !self.force_partition_rewards_in_first_block_of_epoch() { - self.store_account_and_update_capitalization(&address, &new_account); - } - } - fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) { #[allow(deprecated)] self.update_sysvar_account(&sysvar::recent_blockhashes::id(), |account| { @@ -4436,10 +4372,6 @@ impl Bank { self.rc.accounts.accounts_db.remove_unrooted_slots(slots) } - pub fn set_shrink_paths(&self, paths: Vec) { - self.rc.accounts.accounts_db.set_shrink_paths(paths); - } - fn check_age( &self, sanitized_txs: &[impl core::borrow::Borrow], @@ -4593,227 +4525,6 @@ impl Bank { balances } - fn program_modification_slot(&self, pubkey: &Pubkey) -> Result { - let program = self - .get_account_with_fixed_root(pubkey) - .ok_or(TransactionError::ProgramAccountNotFound)?; - if bpf_loader_upgradeable::check_id(program.owner()) { - if let Ok(UpgradeableLoaderState::Program { - programdata_address, - }) = program.state() - { - let programdata = self - .get_account_with_fixed_root(&programdata_address) - .ok_or(TransactionError::ProgramAccountNotFound)?; - if let Ok(UpgradeableLoaderState::ProgramData { - slot, - upgrade_authority_address: _, - }) = programdata.state() - { - return Ok(slot); - } - } - Err(TransactionError::ProgramAccountNotFound) - } else if loader_v4::check_id(program.owner()) { - let state = solana_loader_v4_program::get_state(program.data()) - .map_err(|_| TransactionError::ProgramAccountNotFound)?; - Ok(state.slot) - } else { - Ok(0) - } - } - - fn load_program_accounts( - &self, - pubkey: &Pubkey, - environments: &ProgramRuntimeEnvironments, - ) -> ProgramAccountLoadResult { - let program_account = match self.get_account_with_fixed_root(pubkey) { - None => return ProgramAccountLoadResult::AccountNotFound, - Some(account) => account, - }; - - debug_assert!(solana_bpf_loader_program::check_loader_id( - program_account.owner() - )); - - if loader_v4::check_id(program_account.owner()) { - return solana_loader_v4_program::get_state(program_account.data()) - .ok() - .and_then(|state| { - (!matches!(state.status, LoaderV4Status::Retracted)).then_some(state.slot) - }) - .map(|slot| ProgramAccountLoadResult::ProgramOfLoaderV4(program_account, slot)) - .unwrap_or(ProgramAccountLoadResult::InvalidAccountData( - environments.program_runtime_v2.clone(), - )); - } - - if !bpf_loader_upgradeable::check_id(program_account.owner()) { - return ProgramAccountLoadResult::ProgramOfLoaderV1orV2(program_account); - } - - if let Ok(UpgradeableLoaderState::Program { - programdata_address, - }) = program_account.state() - { - let programdata_account = match self.get_account_with_fixed_root(&programdata_address) { - None => return ProgramAccountLoadResult::AccountNotFound, - Some(account) => account, - }; - - if let Ok(UpgradeableLoaderState::ProgramData { - slot, - upgrade_authority_address: _, - }) = programdata_account.state() - { - return ProgramAccountLoadResult::ProgramOfLoaderV3( - program_account, - programdata_account, - slot, - ); - } - } - ProgramAccountLoadResult::InvalidAccountData(environments.program_runtime_v1.clone()) - } - - fn load_program_from_bytes( - load_program_metrics: &mut LoadProgramMetrics, - programdata: &[u8], - loader_key: &Pubkey, - account_size: usize, - deployment_slot: Slot, - program_runtime_environment: ProgramRuntimeEnvironment, - reloading: bool, - ) -> std::result::Result> { - if reloading { - // Safety: this is safe because the program is being reloaded in the cache. - unsafe { - LoadedProgram::reload( - loader_key, - program_runtime_environment.clone(), - deployment_slot, - deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET), - None, - programdata, - account_size, - load_program_metrics, - ) - } - } else { - LoadedProgram::new( - loader_key, - program_runtime_environment.clone(), - deployment_slot, - deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET), - None, - programdata, - account_size, - load_program_metrics, - ) - } - } - - pub fn load_program( - &self, - pubkey: &Pubkey, - reload: bool, - recompile: Option>, - ) -> Arc { - let loaded_programs_cache = self.loaded_programs_cache.read().unwrap(); - let effective_epoch = if recompile.is_some() { - loaded_programs_cache.latest_root_epoch.saturating_add(1) - } else { - self.epoch - }; - let environments = loaded_programs_cache.get_environments_for_epoch(effective_epoch); - let mut load_program_metrics = LoadProgramMetrics { - program_id: pubkey.to_string(), - ..LoadProgramMetrics::default() - }; - - let mut loaded_program = match self.load_program_accounts(pubkey, environments) { - ProgramAccountLoadResult::AccountNotFound => Ok(LoadedProgram::new_tombstone( - self.slot, - LoadedProgramType::Closed, - )), - - ProgramAccountLoadResult::InvalidAccountData(env) => Err((self.slot, env)), - - ProgramAccountLoadResult::ProgramOfLoaderV1orV2(program_account) => { - Self::load_program_from_bytes( - &mut load_program_metrics, - program_account.data(), - program_account.owner(), - program_account.data().len(), - 0, - environments.program_runtime_v1.clone(), - reload, - ) - .map_err(|_| (0, environments.program_runtime_v1.clone())) - } - - ProgramAccountLoadResult::ProgramOfLoaderV3( - program_account, - programdata_account, - slot, - ) => programdata_account - .data() - .get(UpgradeableLoaderState::size_of_programdata_metadata()..) - .ok_or(Box::new(InstructionError::InvalidAccountData).into()) - .and_then(|programdata| { - Self::load_program_from_bytes( - &mut load_program_metrics, - programdata, - program_account.owner(), - program_account - .data() - .len() - .saturating_add(programdata_account.data().len()), - slot, - environments.program_runtime_v1.clone(), - reload, - ) - }) - .map_err(|_| (slot, environments.program_runtime_v1.clone())), - - ProgramAccountLoadResult::ProgramOfLoaderV4(program_account, slot) => program_account - .data() - .get(LoaderV4State::program_data_offset()..) - .ok_or(Box::new(InstructionError::InvalidAccountData).into()) - .and_then(|elf_bytes| { - Self::load_program_from_bytes( - &mut load_program_metrics, - elf_bytes, - &loader_v4::id(), - program_account.data().len(), - slot, - environments.program_runtime_v2.clone(), - reload, - ) - }) - .map_err(|_| (slot, environments.program_runtime_v2.clone())), - } - .unwrap_or_else(|(slot, env)| { - LoadedProgram::new_tombstone(slot, LoadedProgramType::FailedVerification(env)) - }); - - let mut timings = ExecuteDetailsTimings::default(); - load_program_metrics.submit_datapoint(&mut timings); - if let Some(recompile) = recompile { - loaded_program.effective_slot = loaded_program.effective_slot.max( - self.epoch_schedule() - .get_first_slot_in_epoch(effective_epoch), - ); - loaded_program.tx_usage_counter = - AtomicU64::new(recompile.tx_usage_counter.load(Ordering::Relaxed)); - loaded_program.ix_usage_counter = - AtomicU64::new(recompile.ix_usage_counter.load(Ordering::Relaxed)); - } - loaded_program.update_access_slot(self.slot()); - Arc::new(loaded_program) - } - pub fn clear_program_cache(&self) { self.loaded_programs_cache .write() @@ -4821,311 +4532,6 @@ impl Bank { .unload_all_programs(); } - /// Execute a transaction using the provided loaded accounts and update - /// the executors cache if the transaction was successful. - #[allow(clippy::too_many_arguments)] - fn execute_loaded_transaction( - &self, - tx: &SanitizedTransaction, - loaded_transaction: &mut LoadedTransaction, - compute_budget: ComputeBudget, - durable_nonce_fee: Option, - enable_cpi_recording: bool, - enable_log_recording: bool, - enable_return_data_recording: bool, - timings: &mut ExecuteTimings, - error_counters: &mut TransactionErrorMetrics, - log_messages_bytes_limit: Option, - programs_loaded_for_tx_batch: &LoadedProgramsForTxBatch, - ) -> TransactionExecutionResult { - let transaction_accounts = std::mem::take(&mut loaded_transaction.accounts); - - fn transaction_accounts_lamports_sum( - accounts: &[(Pubkey, AccountSharedData)], - message: &SanitizedMessage, - ) -> Option { - let mut lamports_sum = 0u128; - for i in 0..message.account_keys().len() { - let (_, account) = accounts.get(i)?; - lamports_sum = lamports_sum.checked_add(u128::from(account.lamports()))?; - } - Some(lamports_sum) - } - - let lamports_before_tx = - transaction_accounts_lamports_sum(&transaction_accounts, tx.message()).unwrap_or(0); - - let mut transaction_context = TransactionContext::new( - transaction_accounts, - self.rent_collector.rent.clone(), - compute_budget.max_invoke_stack_height, - compute_budget.max_instruction_trace_length, - ); - #[cfg(debug_assertions)] - transaction_context.set_signature(tx.signature()); - - let pre_account_state_info = TransactionAccountStateInfo::new( - &self.rent_collector.rent, - &transaction_context, - tx.message(), - ); - - let log_collector = if enable_log_recording { - match log_messages_bytes_limit { - None => Some(LogCollector::new_ref()), - Some(log_messages_bytes_limit) => Some(LogCollector::new_ref_with_limit(Some( - log_messages_bytes_limit, - ))), - } - } else { - None - }; - - let (blockhash, lamports_per_signature) = self.last_blockhash_and_lamports_per_signature(); - - let mut executed_units = 0u64; - let mut programs_modified_by_tx = LoadedProgramsForTxBatch::new( - self.slot, - programs_loaded_for_tx_batch.environments.clone(), - ); - let mut process_message_time = Measure::start("process_message_time"); - let process_result = MessageProcessor::process_message( - tx.message(), - &loaded_transaction.program_indices, - &mut transaction_context, - log_collector.clone(), - programs_loaded_for_tx_batch, - &mut programs_modified_by_tx, - self.feature_set.clone(), - compute_budget, - timings, - &self.sysvar_cache.read().unwrap(), - blockhash, - lamports_per_signature, - &mut executed_units, - ); - process_message_time.stop(); - - saturating_add_assign!( - timings.execute_accessories.process_message_us, - process_message_time.as_us() - ); - - let mut status = process_result - .and_then(|info| { - let post_account_state_info = TransactionAccountStateInfo::new( - &self.rent_collector.rent, - &transaction_context, - tx.message(), - ); - TransactionAccountStateInfo::verify_changes( - &pre_account_state_info, - &post_account_state_info, - &transaction_context, - ) - .map(|_| info) - }) - .map_err(|err| { - match err { - TransactionError::InvalidRentPayingAccount - | TransactionError::InsufficientFundsForRent { .. } => { - error_counters.invalid_rent_paying_account += 1; - } - TransactionError::InvalidAccountIndex => { - error_counters.invalid_account_index += 1; - } - _ => { - error_counters.instruction_error += 1; - } - } - err - }); - - let log_messages: Option = - log_collector.and_then(|log_collector| { - Rc::try_unwrap(log_collector) - .map(|log_collector| log_collector.into_inner().into_messages()) - .ok() - }); - - let inner_instructions = if enable_cpi_recording { - Some(inner_instructions_list_from_instruction_trace( - &transaction_context, - )) - } else { - None - }; - - let ExecutionRecord { - accounts, - return_data, - touched_account_count, - accounts_resize_delta: accounts_data_len_delta, - } = transaction_context.into(); - - if status.is_ok() - && transaction_accounts_lamports_sum(&accounts, tx.message()) - .filter(|lamports_after_tx| lamports_before_tx == *lamports_after_tx) - .is_none() - { - status = Err(TransactionError::UnbalancedTransaction); - } - let status = status.map(|_| ()); - - loaded_transaction.accounts = accounts; - saturating_add_assign!( - timings.details.total_account_count, - loaded_transaction.accounts.len() as u64 - ); - saturating_add_assign!(timings.details.changed_account_count, touched_account_count); - - let return_data = if enable_return_data_recording && !return_data.data.is_empty() { - Some(return_data) - } else { - None - }; - - TransactionExecutionResult::Executed { - details: TransactionExecutionDetails { - status, - log_messages, - inner_instructions, - durable_nonce_fee, - return_data, - executed_units, - accounts_data_len_delta, - }, - programs_modified_by_tx: Box::new(programs_modified_by_tx), - } - } - - fn replenish_program_cache( - &self, - program_accounts_map: &HashMap, - ) -> LoadedProgramsForTxBatch { - let mut missing_programs: Vec<(Pubkey, (LoadedProgramMatchCriteria, u64))> = - if self.check_program_modification_slot { - program_accounts_map - .iter() - .map(|(pubkey, (_, count))| { - ( - *pubkey, - ( - self.program_modification_slot(pubkey) - .map_or(LoadedProgramMatchCriteria::Tombstone, |slot| { - LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(slot) - }), - *count, - ), - ) - }) - .collect() - } else { - program_accounts_map - .iter() - .map(|(pubkey, (_, count))| { - (*pubkey, (LoadedProgramMatchCriteria::NoCriteria, *count)) - }) - .collect() - }; - - let mut loaded_programs_for_txs = None; - let mut program_to_store = None; - loop { - let (program_to_load, task_cookie, task_waiter) = { - // Lock the global cache. - let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); - // Initialize our local cache. - if loaded_programs_for_txs.is_none() { - loaded_programs_for_txs = Some(LoadedProgramsForTxBatch::new( - self.slot, - loaded_programs_cache - .get_environments_for_epoch(self.epoch) - .clone(), - )); - } - // Submit our last completed loading task. - if let Some((key, program)) = program_to_store.take() { - loaded_programs_cache.finish_cooperative_loading_task( - self.slot(), - key, - program, - ); - } - // Figure out which program needs to be loaded next. - let program_to_load = loaded_programs_cache.extract( - &mut missing_programs, - loaded_programs_for_txs.as_mut().unwrap(), - ); - let task_waiter = Arc::clone(&loaded_programs_cache.loading_task_waiter); - (program_to_load, task_waiter.cookie(), task_waiter) - // Unlock the global cache again. - }; - - if let Some((key, count)) = program_to_load { - // Load, verify and compile one program. - let program = self.load_program(&key, false, None); - program.tx_usage_counter.store(count, Ordering::Relaxed); - program_to_store = Some((key, program)); - } else if missing_programs.is_empty() { - break; - } else { - // Sleep until the next finish_cooperative_loading_task() call. - // Once a task completes we'll wake up and try to load the - // missing programs inside the tx batch again. - let _new_cookie = task_waiter.wait(task_cookie); - } - } - - loaded_programs_for_txs.unwrap() - } - - /// Returns a hash map of executable program accounts (program accounts that are not writable - /// in the given transactions), and their owners, for the transactions with a valid - /// blockhash or nonce. - fn filter_executable_program_accounts<'a>( - &self, - ancestors: &Ancestors, - txs: &[SanitizedTransaction], - lock_results: &mut [TransactionCheckResult], - program_owners: &'a [Pubkey], - ) -> HashMap { - let mut result: HashMap = HashMap::new(); - lock_results.iter_mut().zip(txs).for_each(|etx| { - if let ((Ok(()), _nonce, lamports_per_signature), tx) = etx { - if lamports_per_signature.is_some() { - tx.message() - .account_keys() - .iter() - .for_each(|key| match result.entry(*key) { - Entry::Occupied(mut entry) => { - let (_, count) = entry.get_mut(); - saturating_add_assign!(*count, 1); - } - Entry::Vacant(entry) => { - if let Ok(index) = self - .rc - .accounts - .accounts_db - .account_matches_owners(ancestors, key, program_owners) - { - program_owners - .get(index) - .map(|owner| entry.insert((owner, 1))); - } - } - }); - } else { - // If the transaction's nonce account was not valid, and blockhash is not found, - // the transaction will fail to process. Let's not load any programs from the - // transaction, and update the status of the transaction. - *etx.0 = (Err(TransactionError::BlockhashNotFound), None, None); - } - } - }); - result - } - #[allow(clippy::type_complexity)] pub fn load_and_execute_transactions( &self, @@ -5189,17 +4595,21 @@ impl Bank { debug!("check: {}us", check_time.as_us()); timings.saturating_add_in_place(ExecuteTimingType::CheckUs, check_time.as_us()); - let sanitized_output = self.load_and_execute_sanitized_transactions( - sanitized_txs, - &mut check_results, - &mut error_counters, - enable_cpi_recording, - enable_log_recording, - enable_return_data_recording, - timings, - account_overrides, - log_messages_bytes_limit, - ); + let sanitized_output = self + .transaction_processor + .load_and_execute_sanitized_transactions( + self, + sanitized_txs, + &mut check_results, + &mut error_counters, + enable_cpi_recording, + enable_log_recording, + enable_return_data_recording, + timings, + account_overrides, + self.builtin_programs.iter(), + log_messages_bytes_limit, + ); let mut signature_count = 0; @@ -5328,141 +4738,6 @@ impl Bank { } } - #[allow(clippy::too_many_arguments)] - fn load_and_execute_sanitized_transactions( - &self, - sanitized_txs: &[SanitizedTransaction], - check_results: &mut [TransactionCheckResult], - error_counters: &mut TransactionErrorMetrics, - enable_cpi_recording: bool, - enable_log_recording: bool, - enable_return_data_recording: bool, - timings: &mut ExecuteTimings, - account_overrides: Option<&AccountOverrides>, - log_messages_bytes_limit: Option, - ) -> LoadAndExecuteSanitizedTransactionsOutput { - let mut program_accounts_map = self.filter_executable_program_accounts( - &self.ancestors, - sanitized_txs, - check_results, - PROGRAM_OWNERS, - ); - let native_loader = native_loader::id(); - for builtin_program in self.builtin_programs.iter() { - program_accounts_map.insert(*builtin_program, (&native_loader, 0)); - } - - let programs_loaded_for_tx_batch = Rc::new(RefCell::new( - self.replenish_program_cache(&program_accounts_map), - )); - - let mut load_time = Measure::start("accounts_load"); - let mut loaded_transactions = load_accounts( - &self.rc.accounts.accounts_db, - &self.ancestors, - sanitized_txs, - check_results, - error_counters, - &self.rent_collector, - &self.feature_set, - &self.fee_structure, - account_overrides, - self.get_reward_interval(), - &program_accounts_map, - &programs_loaded_for_tx_batch.borrow(), - ); - load_time.stop(); - - let mut execution_time = Measure::start("execution_time"); - - let execution_results: Vec = loaded_transactions - .iter_mut() - .zip(sanitized_txs.iter()) - .map(|(accs, tx)| match accs { - (Err(e), _nonce) => TransactionExecutionResult::NotExecuted(e.clone()), - (Ok(loaded_transaction), nonce) => { - let compute_budget = - if let Some(compute_budget) = self.runtime_config.compute_budget { - compute_budget - } else { - let mut compute_budget_process_transaction_time = - Measure::start("compute_budget_process_transaction_time"); - let maybe_compute_budget = ComputeBudget::try_from_instructions( - tx.message().program_instructions_iter(), - ); - compute_budget_process_transaction_time.stop(); - saturating_add_assign!( - timings - .execute_accessories - .compute_budget_process_transaction_us, - compute_budget_process_transaction_time.as_us() - ); - if let Err(err) = maybe_compute_budget { - return TransactionExecutionResult::NotExecuted(err); - } - maybe_compute_budget.unwrap() - }; - - let result = self.execute_loaded_transaction( - tx, - loaded_transaction, - compute_budget, - nonce.as_ref().map(DurableNonceFee::from), - enable_cpi_recording, - enable_log_recording, - enable_return_data_recording, - timings, - error_counters, - log_messages_bytes_limit, - &programs_loaded_for_tx_batch.borrow(), - ); - - if let TransactionExecutionResult::Executed { - details, - programs_modified_by_tx, - } = &result - { - // Update batch specific cache of the loaded programs with the modifications - // made by the transaction, if it executed successfully. - if details.status.is_ok() { - programs_loaded_for_tx_batch - .borrow_mut() - .merge(programs_modified_by_tx); - } - } - - result - } - }) - .collect(); - - execution_time.stop(); - - const SHRINK_LOADED_PROGRAMS_TO_PERCENTAGE: u8 = 90; - self.loaded_programs_cache - .write() - .unwrap() - .evict_using_2s_random_selection( - Percentage::from(SHRINK_LOADED_PROGRAMS_TO_PERCENTAGE), - self.slot(), - ); - - debug!( - "load: {}us execute: {}us txs_len={}", - load_time.as_us(), - execution_time.as_us(), - sanitized_txs.len(), - ); - - timings.saturating_add_in_place(ExecuteTimingType::LoadUs, load_time.as_us()); - timings.saturating_add_in_place(ExecuteTimingType::ExecuteUs, execution_time.as_us()); - - LoadAndExecuteSanitizedTransactionsOutput { - loaded_transactions, - execution_results, - } - } - /// Load the accounts data size, in bytes pub fn load_accounts_data_size(&self) -> u64 { self.accounts_data_size_initial @@ -5636,7 +4911,6 @@ impl Bank { sanitized_txs, &execution_results, loaded_txs, - &self.rent_collector, &durable_nonce, lamports_per_signature, ); @@ -5965,15 +5239,12 @@ impl Bank { .accounts .accounts_db .test_skip_rewrites_but_include_in_bank_hash; - let set_exempt_rent_epoch_max: bool = self - .feature_set - .is_active(&solana_sdk::feature_set::set_exempt_rent_epoch_max::id()); let mut skipped_rewrites = Vec::default(); for (pubkey, account, _loaded_slot) in accounts.iter_mut() { let rent_collected_info = if self.should_collect_rent() { let (rent_collected_info, measure) = measure!(self .rent_collector - .collect_from_existing_account(pubkey, account, set_exempt_rent_epoch_max,)); + .collect_from_existing_account(pubkey, account)); time_collecting_rent_us += measure.as_us(); rent_collected_info } else { @@ -5981,9 +5252,8 @@ impl Bank { // are any rent paying accounts, their `rent_epoch` won't change either. However, if the // account itself is rent-exempted but its `rent_epoch` is not u64::MAX, we will set its // `rent_epoch` to u64::MAX. In such case, the behavior stays the same as before. - if set_exempt_rent_epoch_max - && (account.rent_epoch() != RENT_EXEMPT_RENT_EPOCH - && self.rent_collector.get_rent_due(account) == RentDue::Exempt) + if account.rent_epoch() != RENT_EXEMPT_RENT_EPOCH + && self.rent_collector.get_rent_due(account) == RentDue::Exempt { account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); } @@ -6769,10 +6039,18 @@ impl Bank { // pro: safer assertion can be enabled inside AccountsDb // con: panics!() if called from off-chain processing pub fn get_account_with_fixed_root(&self, pubkey: &Pubkey) -> Option { - self.load_slow_with_fixed_root(&self.ancestors, pubkey) + self.get_account_modified_slot_with_fixed_root(pubkey) .map(|(acc, _slot)| acc) } + // See note above get_account_with_fixed_root() about when to prefer this function + pub fn get_account_modified_slot_with_fixed_root( + &self, + pubkey: &Pubkey, + ) -> Option<(AccountSharedData, Slot)> { + self.load_slow_with_fixed_root(&self.ancestors, pubkey) + } + pub fn get_account_modified_slot(&self, pubkey: &Pubkey) -> Option<(AccountSharedData, Slot)> { self.load_slow(&self.ancestors, pubkey) } @@ -8192,7 +7470,7 @@ impl Bank { pub fn is_in_slot_hashes_history(&self, slot: &Slot) -> bool { if slot < &self.slot { - if let Ok(sysvar_cache) = self.sysvar_cache.read() { + if let Ok(sysvar_cache) = self.transaction_processor.sysvar_cache.read() { if let Ok(slot_hashes) = sysvar_cache.get_slot_hashes() { return slot_hashes.get(slot).is_some(); } @@ -8200,6 +7478,70 @@ impl Bank { } false } + + pub fn check_program_modification_slot(&mut self) { + self.transaction_processor.check_program_modification_slot = true; + } + + pub fn load_program( + &self, + pubkey: &Pubkey, + reload: bool, + recompile: Option>, + ) -> Arc { + self.transaction_processor + .load_program(self, pubkey, reload, recompile) + } +} + +impl TransactionProcessingCallback for Bank { + fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option { + self.rc + .accounts + .accounts_db + .account_matches_owners(&self.ancestors, account, owners) + .ok() + } + + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { + self.rc + .accounts + .accounts_db + .load_with_fixed_root(&self.ancestors, pubkey) + .map(|(acc, _)| acc) + } + + fn get_last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) { + self.last_blockhash_and_lamports_per_signature() + } + + fn get_rent_collector(&self) -> &RentCollector { + &self.rent_collector + } + + fn get_feature_set(&self) -> Arc { + self.feature_set.clone() + } + + fn check_account_access( + &self, + tx: &SanitizedTransaction, + account_index: usize, + account: &AccountSharedData, + error_counters: &mut TransactionErrorMetrics, + ) -> Result<()> { + if self.get_reward_interval() == RewardInterval::InsideInterval + && tx.message().is_writable(account_index) + && solana_stake_program::check_id(account.owner()) + { + error_counters.program_execution_temporarily_restricted += 1; + Err(TransactionError::ProgramExecutionTemporarilyRestricted { + account_index: account_index as u8, + }) + } else { + Ok(()) + } + } } #[cfg(feature = "dev-context-only-utils")] diff --git a/runtime/src/bank/address_lookup_table.rs b/runtime/src/bank/address_lookup_table.rs index 07c82acf6da8b1..483ec7cea00ea1 100644 --- a/runtime/src/bank/address_lookup_table.rs +++ b/runtime/src/bank/address_lookup_table.rs @@ -16,6 +16,7 @@ impl AddressLoader for &Bank { address_table_lookups: &[MessageAddressTableLookup], ) -> Result { let slot_hashes = self + .transaction_processor .sysvar_cache .read() .unwrap() diff --git a/runtime/src/bank/fee_distribution.rs b/runtime/src/bank/fee_distribution.rs index 85d68c07fd7448..5a53a1278881fa 100644 --- a/runtime/src/bank/fee_distribution.rs +++ b/runtime/src/bank/fee_distribution.rs @@ -1,14 +1,14 @@ use { super::Bank, - crate::svm::account_rent_state::RentState, log::{debug, warn}, - solana_accounts_db::stake_rewards::RewardInfo, solana_sdk::{ account::{ReadableAccount, WritableAccount}, pubkey::Pubkey, + reward_info::RewardInfo, reward_type::RewardType, system_program, }, + solana_svm::account_rent_state::RentState, solana_vote::vote_account::VoteAccountsHashMap, std::{result::Result, sync::atomic::Ordering::Relaxed}, thiserror::Error, diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index df51d31e568cee..8b78efbcf3e11a 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -7,7 +7,6 @@ mod tests { StartBlockHeightAndRewards, }, genesis_utils::activate_all_features, - runtime_config::RuntimeConfig, serde_snapshot::{ reserialize_bank_with_new_accounts_hash, BankIncrementalSnapshotPersistence, SerdeAccountsHash, SerdeIncrementalAccountsHash, SerdeStyle, SnapshotStreams, @@ -39,6 +38,7 @@ mod tests { pubkey::Pubkey, signature::{Keypair, Signer}, }, + solana_svm::runtime_config::RuntimeConfig, std::{ io::{Cursor, Read, Write}, num::NonZeroUsize, @@ -605,7 +605,7 @@ mod tests { // This some what long test harness is required to freeze the ABI of // Bank's serialization due to versioned nature - #[frozen_abi(digest = "12WNiuA7qeLU8JFweQszX5sCnCj1fYnYV4i9DeACqhQD")] + #[frozen_abi(digest = "7BH2s2Y1yKy396c3ixC4TTyvvpkyenAvWDSiZvY5yb7P")] #[derive(Serialize, AbiExample)] pub struct BankAbiTestWrapperNewer { #[serde(serialize_with = "wrapper_newer")] diff --git a/runtime/src/bank/sysvar_cache.rs b/runtime/src/bank/sysvar_cache.rs index d6131695fb7d5f..91a22907d6e888 100644 --- a/runtime/src/bank/sysvar_cache.rs +++ b/runtime/src/bank/sysvar_cache.rs @@ -5,7 +5,7 @@ use { impl Bank { pub(crate) fn fill_missing_sysvar_cache_entries(&self) { - let mut sysvar_cache = self.sysvar_cache.write().unwrap(); + let mut sysvar_cache = self.transaction_processor.sysvar_cache.write().unwrap(); sysvar_cache.fill_missing_entries(|pubkey, callback| { if let Some(account) = self.get_account_with_fixed_root(pubkey) { callback(account.data()); @@ -14,12 +14,16 @@ impl Bank { } pub(crate) fn reset_sysvar_cache(&self) { - let mut sysvar_cache = self.sysvar_cache.write().unwrap(); + let mut sysvar_cache = self.transaction_processor.sysvar_cache.write().unwrap(); sysvar_cache.reset(); } pub fn get_sysvar_cache_for_tests(&self) -> SysvarCache { - self.sysvar_cache.read().unwrap().clone() + self.transaction_processor + .sysvar_cache + .read() + .unwrap() + .clone() } } @@ -40,7 +44,7 @@ mod tests { let (genesis_config, _mint_keypair) = create_genesis_config(100_000); let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); - let bank0_sysvar_cache = bank0.sysvar_cache.read().unwrap(); + let bank0_sysvar_cache = bank0.transaction_processor.sysvar_cache.read().unwrap(); let bank0_cached_clock = bank0_sysvar_cache.get_clock(); let bank0_cached_epoch_schedule = bank0_sysvar_cache.get_epoch_schedule(); let bank0_cached_fees = bank0_sysvar_cache.get_fees(); @@ -60,7 +64,7 @@ mod tests { bank1_slot, )); - let bank1_sysvar_cache = bank1.sysvar_cache.read().unwrap(); + let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache.read().unwrap(); let bank1_cached_clock = bank1_sysvar_cache.get_clock(); let bank1_cached_epoch_schedule = bank1_sysvar_cache.get_epoch_schedule(); let bank1_cached_fees = bank1_sysvar_cache.get_fees(); @@ -81,7 +85,7 @@ mod tests { let bank2_slot = bank1.slot() + 1; let bank2 = Bank::new_from_parent(bank1.clone(), &Pubkey::default(), bank2_slot); - let bank2_sysvar_cache = bank2.sysvar_cache.read().unwrap(); + let bank2_sysvar_cache = bank2.transaction_processor.sysvar_cache.read().unwrap(); let bank2_cached_clock = bank2_sysvar_cache.get_clock(); let bank2_cached_epoch_schedule = bank2_sysvar_cache.get_epoch_schedule(); let bank2_cached_fees = bank2_sysvar_cache.get_fees(); @@ -112,7 +116,7 @@ mod tests { let bank1_slot = bank0.slot() + 1; let mut bank1 = Bank::new_from_parent(bank0, &Pubkey::default(), bank1_slot); - let bank1_sysvar_cache = bank1.sysvar_cache.read().unwrap(); + let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache.read().unwrap(); let bank1_cached_clock = bank1_sysvar_cache.get_clock(); let bank1_cached_epoch_schedule = bank1_sysvar_cache.get_epoch_schedule(); let bank1_cached_fees = bank1_sysvar_cache.get_fees(); @@ -130,7 +134,7 @@ mod tests { drop(bank1_sysvar_cache); bank1.reset_sysvar_cache(); - let bank1_sysvar_cache = bank1.sysvar_cache.read().unwrap(); + let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache.read().unwrap(); assert!(bank1_sysvar_cache.get_clock().is_err()); assert!(bank1_sysvar_cache.get_epoch_schedule().is_err()); assert!(bank1_sysvar_cache.get_fees().is_err()); @@ -155,7 +159,7 @@ mod tests { bank1.fill_missing_sysvar_cache_entries(); - let bank1_sysvar_cache = bank1.sysvar_cache.read().unwrap(); + let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache.read().unwrap(); assert_eq!(bank1_sysvar_cache.get_clock(), bank1_cached_clock); assert_eq!( bank1_sysvar_cache.get_epoch_schedule(), diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index ad28005fccfe7e..f6db68d3c0c0c1 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -34,10 +34,8 @@ use { accounts_partition::{self, PartitionIndex, RentPayingAccountsByPartition}, ancestors::Ancestors, inline_spl_token, - nonce_info::NonceFull, partitioned_rewards::TestPartitionedEpochRewards, - rent_collector::RENT_EXEMPT_RENT_EPOCH, - transaction_error_metrics::TransactionErrorMetrics, + transaction_results::DurableNonceFee, }, solana_logger, solana_program_runtime::{ @@ -45,7 +43,10 @@ use { compute_budget_processor::{self, MAX_COMPUTE_UNIT_LIMIT}, declare_process_instruction, invoke_context::mock_process_instruction, - loaded_programs::{LoadedProgram, LoadedProgramType, DELAY_VISIBILITY_SLOT_OFFSET}, + loaded_programs::{ + LoadedProgram, LoadedProgramType, LoadedProgramsForTxBatch, + DELAY_VISIBILITY_SLOT_OFFSET, + }, prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, timings::ExecuteTimings, }, @@ -80,11 +81,13 @@ use { native_loader, native_token::{sol_to_lamports, LAMPORTS_PER_SOL}, nonce::{self, state::DurableNonce}, + nonce_info::NonceFull, packet::PACKET_DATA_SIZE, poh_config::PohConfig, program::MAX_RETURN_DATA, pubkey::Pubkey, rent::Rent, + rent_collector::RENT_EXEMPT_RENT_EPOCH, reward_type::RewardType, secp256k1_program, signature::{keypair_from_seed, Keypair, Signature, Signer}, @@ -105,6 +108,10 @@ use { transaction_context::{TransactionAccount, TransactionContext}, }, solana_stake_program::stake_state::{self, StakeStateV2}, + solana_svm::{ + account_loader::load_accounts, transaction_account_state_info::TransactionAccountStateInfo, + transaction_error_metrics::TransactionErrorMetrics, + }, solana_vote_program::{ vote_instruction, vote_state::{ @@ -447,124 +454,120 @@ fn rent_with_exemption_threshold(exemption_threshold: f64) -> Rent { /// one thing being tested here is that a failed tx (due to rent collection using up all lamports) followed by rent collection /// results in the same state as if just rent collection ran (and emptied the accounts that have too few lamports) fn test_credit_debit_rent_no_side_effect_on_hash() { - for set_exempt_rent_epoch_max in [false, true] { - solana_logger::setup(); + solana_logger::setup(); - let (mut genesis_config, _mint_keypair) = create_genesis_config_no_tx_fee(10); + let (mut genesis_config, _mint_keypair) = create_genesis_config_no_tx_fee(10); - genesis_config.rent = rent_with_exemption_threshold(21.0); + genesis_config.rent = rent_with_exemption_threshold(21.0); - let slot = years_as_slots( - 2.0, - &genesis_config.poh_config.target_tick_duration, - genesis_config.ticks_per_slot, - ) as u64; - let (root_bank, bank_forks_1) = Bank::new_with_bank_forks_for_tests(&genesis_config); - let bank = new_bank_from_parent_with_bank_forks( - bank_forks_1.as_ref(), - root_bank, - &Pubkey::default(), - slot, - ); + let slot = years_as_slots( + 2.0, + &genesis_config.poh_config.target_tick_duration, + genesis_config.ticks_per_slot, + ) as u64; + let (root_bank, bank_forks_1) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank = new_bank_from_parent_with_bank_forks( + bank_forks_1.as_ref(), + root_bank, + &Pubkey::default(), + slot, + ); - let (root_bank_2, bank_forks_2) = Bank::new_with_bank_forks_for_tests(&genesis_config); - let bank_with_success_txs = new_bank_from_parent_with_bank_forks( - bank_forks_2.as_ref(), - root_bank_2, - &Pubkey::default(), - slot, - ); + let (root_bank_2, bank_forks_2) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank_with_success_txs = new_bank_from_parent_with_bank_forks( + bank_forks_2.as_ref(), + root_bank_2, + &Pubkey::default(), + slot, + ); - assert_eq!(bank.last_blockhash(), genesis_config.hash()); - - let plenty_of_lamports = 264; - let too_few_lamports = 10; - // Initialize credit-debit and credit only accounts - let accounts = [ - AccountSharedData::new(plenty_of_lamports, 0, &Pubkey::default()), - AccountSharedData::new(plenty_of_lamports, 1, &Pubkey::default()), - AccountSharedData::new(plenty_of_lamports, 0, &Pubkey::default()), - AccountSharedData::new(plenty_of_lamports, 1, &Pubkey::default()), - // Transaction between these two accounts will fail - AccountSharedData::new(too_few_lamports, 0, &Pubkey::default()), - AccountSharedData::new(too_few_lamports, 1, &Pubkey::default()), - ]; - - let keypairs = accounts.iter().map(|_| Keypair::new()).collect::>(); - { - // make sure rent and epoch change are such that we collect all lamports in accounts 4 & 5 - let mut account_copy = accounts[4].clone(); - let expected_rent = bank.rent_collector().collect_from_existing_account( - &keypairs[4].pubkey(), - &mut account_copy, - set_exempt_rent_epoch_max, - ); - assert_eq!(expected_rent.rent_amount, too_few_lamports); - assert_eq!(account_copy.lamports(), 0); - } + assert_eq!(bank.last_blockhash(), genesis_config.hash()); - for i in 0..accounts.len() { - let account = &accounts[i]; - bank.store_account(&keypairs[i].pubkey(), account); - bank_with_success_txs.store_account(&keypairs[i].pubkey(), account); - } + let plenty_of_lamports = 264; + let too_few_lamports = 10; + // Initialize credit-debit and credit only accounts + let accounts = [ + AccountSharedData::new(plenty_of_lamports, 0, &Pubkey::default()), + AccountSharedData::new(plenty_of_lamports, 1, &Pubkey::default()), + AccountSharedData::new(plenty_of_lamports, 0, &Pubkey::default()), + AccountSharedData::new(plenty_of_lamports, 1, &Pubkey::default()), + // Transaction between these two accounts will fail + AccountSharedData::new(too_few_lamports, 0, &Pubkey::default()), + AccountSharedData::new(too_few_lamports, 1, &Pubkey::default()), + ]; - // Make builtin instruction loader rent exempt - let system_program_id = system_program::id(); - let mut system_program_account = bank.get_account(&system_program_id).unwrap(); - system_program_account.set_lamports( - bank.get_minimum_balance_for_rent_exemption(system_program_account.data().len()), - ); - bank.store_account(&system_program_id, &system_program_account); - bank_with_success_txs.store_account(&system_program_id, &system_program_account); + let keypairs = accounts.iter().map(|_| Keypair::new()).collect::>(); + { + // make sure rent and epoch change are such that we collect all lamports in accounts 4 & 5 + let mut account_copy = accounts[4].clone(); + let expected_rent = bank + .rent_collector() + .collect_from_existing_account(&keypairs[4].pubkey(), &mut account_copy); + assert_eq!(expected_rent.rent_amount, too_few_lamports); + assert_eq!(account_copy.lamports(), 0); + } - let t1 = system_transaction::transfer( - &keypairs[0], - &keypairs[1].pubkey(), - 1, - genesis_config.hash(), - ); - let t2 = system_transaction::transfer( - &keypairs[2], - &keypairs[3].pubkey(), - 1, - genesis_config.hash(), - ); - // the idea is this transaction will result in both accounts being drained of all lamports due to rent collection - let t3 = system_transaction::transfer( - &keypairs[4], - &keypairs[5].pubkey(), - 1, - genesis_config.hash(), - ); + for i in 0..accounts.len() { + let account = &accounts[i]; + bank.store_account(&keypairs[i].pubkey(), account); + bank_with_success_txs.store_account(&keypairs[i].pubkey(), account); + } - let txs = vec![t1.clone(), t2.clone(), t3]; - let res = bank.process_transactions(txs.iter()); + // Make builtin instruction loader rent exempt + let system_program_id = system_program::id(); + let mut system_program_account = bank.get_account(&system_program_id).unwrap(); + system_program_account.set_lamports( + bank.get_minimum_balance_for_rent_exemption(system_program_account.data().len()), + ); + bank.store_account(&system_program_id, &system_program_account); + bank_with_success_txs.store_account(&system_program_id, &system_program_account); - assert_eq!(res.len(), 3); - assert_eq!(res[0], Ok(())); - assert_eq!(res[1], Ok(())); - assert_eq!(res[2], Err(TransactionError::AccountNotFound)); + let t1 = system_transaction::transfer( + &keypairs[0], + &keypairs[1].pubkey(), + 1, + genesis_config.hash(), + ); + let t2 = system_transaction::transfer( + &keypairs[2], + &keypairs[3].pubkey(), + 1, + genesis_config.hash(), + ); + // the idea is this transaction will result in both accounts being drained of all lamports due to rent collection + let t3 = system_transaction::transfer( + &keypairs[4], + &keypairs[5].pubkey(), + 1, + genesis_config.hash(), + ); - bank.freeze(); + let txs = vec![t1.clone(), t2.clone(), t3]; + let res = bank.process_transactions(txs.iter()); - let rwlockguard_bank_hash = bank.hash.read().unwrap(); - let bank_hash = rwlockguard_bank_hash.as_ref(); + assert_eq!(res.len(), 3); + assert_eq!(res[0], Ok(())); + assert_eq!(res[1], Ok(())); + assert_eq!(res[2], Err(TransactionError::AccountNotFound)); - let txs = vec![t2, t1]; - let res = bank_with_success_txs.process_transactions(txs.iter()); + bank.freeze(); - assert_eq!(res.len(), 2); - assert_eq!(res[0], Ok(())); - assert_eq!(res[1], Ok(())); + let rwlockguard_bank_hash = bank.hash.read().unwrap(); + let bank_hash = rwlockguard_bank_hash.as_ref(); - bank_with_success_txs.freeze(); + let txs = vec![t2, t1]; + let res = bank_with_success_txs.process_transactions(txs.iter()); - let rwlockguard_bank_with_success_txs_hash = bank_with_success_txs.hash.read().unwrap(); - let bank_with_success_txs_hash = rwlockguard_bank_with_success_txs_hash.as_ref(); + assert_eq!(res.len(), 2); + assert_eq!(res[0], Ok(())); + assert_eq!(res[1], Ok(())); - assert_eq!(bank_with_success_txs_hash, bank_hash); - } + bank_with_success_txs.freeze(); + + let rwlockguard_bank_with_success_txs_hash = bank_with_success_txs.hash.read().unwrap(); + let bank_with_success_txs_hash = rwlockguard_bank_with_success_txs_hash.as_ref(); + + assert_eq!(bank_with_success_txs_hash, bank_hash); } fn store_accounts_for_rent_test( @@ -1648,10 +1651,9 @@ impl Bank { #[test_case(false; "disable rent fees collection")] fn test_rent_eager_collect_rent_in_partition(should_collect_rent: bool) { solana_logger::setup(); - let (mut genesis_config, _mint_keypair) = create_genesis_config(1_000_000); for feature_id in FeatureSet::default().inactive { - if feature_id != solana_sdk::feature_set::set_exempt_rent_epoch_max::id() + if feature_id != solana_sdk::feature_set::skip_rent_rewrites::id() && (!should_collect_rent || feature_id != solana_sdk::feature_set::disable_rent_fees_collection::id()) { @@ -1733,11 +1735,9 @@ fn test_rent_eager_collect_rent_in_partition(should_collect_rent: bool) { bank.get_account(&rent_exempt_pubkey).unwrap().lamports(), large_lamports ); - // Once preserve_rent_epoch_for_rent_exempt_accounts is activated, - // rent_epoch of rent-exempt accounts will no longer advance. assert_eq!( bank.get_account(&rent_exempt_pubkey).unwrap().rent_epoch(), - 0 + RENT_EXEMPT_RENT_EPOCH ); assert_eq!( bank.slots_by_pubkey(&rent_due_pubkey, &ancestors), @@ -6477,43 +6477,45 @@ fn test_fuzz_instructions() { fn test_bank_hash_consistency() { solana_logger::setup(); - let mut genesis_config = GenesisConfig::new( - &[( - Pubkey::from([42; 32]), - AccountSharedData::new(1_000_000_000_000, 0, &system_program::id()), - )], - &[], - ); + let account = AccountSharedData::new(1_000_000_000_000, 0, &system_program::id()); + assert_eq!(account.rent_epoch(), 0); + let mut genesis_config = GenesisConfig::new(&[(Pubkey::from([42; 32]), account)], &[]); genesis_config.creation_time = 0; genesis_config.cluster_type = ClusterType::MainnetBeta; genesis_config.rent.burn_percent = 100; + activate_feature( + &mut genesis_config, + solana_sdk::feature_set::set_exempt_rent_epoch_max::id(), + ); + let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); // Check a few slots, cross an epoch boundary assert_eq!(bank.get_slots_in_epoch(0), 32); loop { goto_end_of_slot(bank.clone()); + if bank.slot == 0 { assert_eq!( bank.hash().to_string(), - "trdzvRDTAXAqo1i2GX4JfK9ReixV1NYNG7DRaVq43Do", + "3VqF5pMe3XABLqzUaYw2UVXfAokMJgMkrdfvneFQkHbB", ); } if bank.slot == 32 { assert_eq!( bank.hash().to_string(), - "2rdj8QEnDnBSyMv81rCmncss4UERACyXXB3pEvkep8eS", + "B8GsaBJ9aJrQcbhTTfgNVuV4uwb4v8nKT86HUjDLvNgk", ); } if bank.slot == 64 { assert_eq!( bank.hash().to_string(), - "7g3ofXVQB3reFt9ki8zLA8S4w1GdmEWsWuWrwkPN3SSv" + "Eg9VRE3zUwarxWyHXhitX9wLkg1vfNeiVqVQxSif6qEC" ); } if bank.slot == 128 { assert_eq!( bank.hash().to_string(), - "4uX1AZFbqwjwWBACWbAW3V8rjbWH4N3ZRTbNysSLAzj2" + "5rLmK24zyxdeb8aLn5LDEnHLDQmxRd5gWZDVJGgsFX1c" ); break; } @@ -10987,16 +10989,12 @@ fn test_rent_state_list_len() { let sanitized_tx = SanitizedTransaction::try_from_legacy_transaction(tx).unwrap(); let mut error_counters = TransactionErrorMetrics::default(); let loaded_txs = load_accounts( - &bank.accounts().accounts_db, - &bank.ancestors, + &bank, &[sanitized_tx.clone()], &[(Ok(()), None, Some(0))], &mut error_counters, - &bank.rent_collector, - &bank.feature_set, &FeeStructure::default(), None, - RewardInterval::OutsideInterval, &HashMap::new(), &LoadedProgramsForTxBatch::default(), ); @@ -11599,57 +11597,53 @@ fn test_get_rent_paying_pubkeys() { #[test_case(true; "enable rent fees collection")] #[test_case(false; "disable rent fees collection")] fn test_accounts_data_size_and_rent_collection(should_collect_rent: bool) { - for set_exempt_rent_epoch_max in [false, true] { - let GenesisConfigInfo { - mut genesis_config, .. - } = genesis_utils::create_genesis_config(100 * LAMPORTS_PER_SOL); - genesis_config.rent = Rent::default(); - if should_collect_rent { - genesis_config - .accounts - .remove(&solana_sdk::feature_set::disable_rent_fees_collection::id()); - } + let GenesisConfigInfo { + mut genesis_config, .. + } = genesis_utils::create_genesis_config(100 * LAMPORTS_PER_SOL); + genesis_config.rent = Rent::default(); + if should_collect_rent { + genesis_config + .accounts + .remove(&solana_sdk::feature_set::disable_rent_fees_collection::id()); + } - let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); - let slot = bank.slot() + bank.slot_count_per_normal_epoch(); - let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); + let slot = bank.slot() + bank.slot_count_per_normal_epoch(); + let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); - // make another bank so that any reclaimed accounts from the previous bank do not impact - // this test - let slot = bank.slot() + bank.slot_count_per_normal_epoch(); - let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); + // make another bank so that any reclaimed accounts from the previous bank do not impact + // this test + let slot = bank.slot() + bank.slot_count_per_normal_epoch(); + let bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); - // Store an account into the bank that is rent-paying and has data - let data_size = 123; - let mut account = AccountSharedData::new(1, data_size, &Pubkey::default()); - let keypair = Keypair::new(); - bank.store_account(&keypair.pubkey(), &account); + // Store an account into the bank that is rent-paying and has data + let data_size = 123; + let mut account = AccountSharedData::new(1, data_size, &Pubkey::default()); + let keypair = Keypair::new(); + bank.store_account(&keypair.pubkey(), &account); - // Ensure if we collect rent from the account that it will be reclaimed - { - let info = bank.rent_collector.collect_from_existing_account( - &keypair.pubkey(), - &mut account, - set_exempt_rent_epoch_max, - ); - assert_eq!(info.account_data_len_reclaimed, data_size as u64); - } + // Ensure if we collect rent from the account that it will be reclaimed + { + let info = bank + .rent_collector + .collect_from_existing_account(&keypair.pubkey(), &mut account); + assert_eq!(info.account_data_len_reclaimed, data_size as u64); + } - // Collect rent for real - assert_eq!(should_collect_rent, bank.should_collect_rent()); - let accounts_data_size_delta_before_collecting_rent = bank.load_accounts_data_size_delta(); - bank.collect_rent_eagerly(); - let accounts_data_size_delta_after_collecting_rent = bank.load_accounts_data_size_delta(); + // Collect rent for real + assert_eq!(should_collect_rent, bank.should_collect_rent()); + let accounts_data_size_delta_before_collecting_rent = bank.load_accounts_data_size_delta(); + bank.collect_rent_eagerly(); + let accounts_data_size_delta_after_collecting_rent = bank.load_accounts_data_size_delta(); - let accounts_data_size_delta_delta = accounts_data_size_delta_after_collecting_rent - - accounts_data_size_delta_before_collecting_rent; - assert!(!should_collect_rent || accounts_data_size_delta_delta < 0); - let reclaimed_data_size = accounts_data_size_delta_delta.saturating_neg() as usize; + let accounts_data_size_delta_delta = accounts_data_size_delta_after_collecting_rent + - accounts_data_size_delta_before_collecting_rent; + assert!(!should_collect_rent || accounts_data_size_delta_delta < 0); + let reclaimed_data_size = accounts_data_size_delta_delta.saturating_neg() as usize; - // Ensure the account is reclaimed by rent collection - assert!(!should_collect_rent || reclaimed_data_size == data_size); - } + // Ensure the account is reclaimed by rent collection + assert!(!should_collect_rent || reclaimed_data_size == data_size); } #[test] @@ -11859,87 +11853,6 @@ fn test_feature_hashes_per_tick() { assert_eq!(bank.hashes_per_tick, Some(UPDATED_HASHES_PER_TICK6)); } -#[test_case(true)] -#[test_case(false)] -fn test_stake_account_consistency_with_rent_epoch_max_feature( - rent_epoch_max_enabled_initially: bool, -) { - // this test can be removed once set_exempt_rent_epoch_max gets activated - solana_logger::setup(); - let (mut genesis_config, _mint_keypair) = create_genesis_config(100 * LAMPORTS_PER_SOL); - genesis_config.rent = Rent::default(); - let mut bank = Bank::new_for_tests(&genesis_config); - let expected_initial_rent_epoch = if rent_epoch_max_enabled_initially { - bank.activate_feature(&solana_sdk::feature_set::set_exempt_rent_epoch_max::id()); - RENT_EXEMPT_RENT_EPOCH - } else { - Epoch::default() - }; - - let mut pubkey_bytes_early = [0u8; 32]; - pubkey_bytes_early[31] = 2; - let stake_id1 = Pubkey::from(pubkey_bytes_early); - let vote_id = solana_sdk::pubkey::new_rand(); - let stake_account1 = crate::stakes::tests::create_stake_account(12300000, &vote_id, &stake_id1); - - // set up accounts - bank.store_account_and_update_capitalization(&stake_id1, &stake_account1); - - // create banks at a few slots - assert_eq!( - bank.load_slow(&bank.ancestors, &stake_id1) - .unwrap() - .0 - .rent_epoch(), - 0 // manually created, so default is 0 - ); - let slot = 1; - let slots_per_epoch = bank.epoch_schedule().get_slots_in_epoch(0); - let mut bank = Bank::new_from_parent(Arc::new(bank), &Pubkey::default(), slot); - if !rent_epoch_max_enabled_initially { - bank.activate_feature(&solana_sdk::feature_set::set_exempt_rent_epoch_max::id()); - } - let bank = Arc::new(bank); - - let slot = slots_per_epoch - 1; - assert_eq!( - bank.load_slow(&bank.ancestors, &stake_id1) - .unwrap() - .0 - .rent_epoch(), - // rent has been collected, so if rent epoch is max is activated, this will be max by now - expected_initial_rent_epoch - ); - let mut bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); - - let last_slot_in_epoch = bank.epoch_schedule().get_last_slot_in_epoch(1); - let slot = last_slot_in_epoch - 2; - assert_eq!( - bank.load_slow(&bank.ancestors, &stake_id1) - .unwrap() - .0 - .rent_epoch(), - expected_initial_rent_epoch - ); - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); - assert_eq!( - bank.load_slow(&bank.ancestors, &stake_id1) - .unwrap() - .0 - .rent_epoch(), - expected_initial_rent_epoch - ); - let slot = last_slot_in_epoch - 1; - bank = Arc::new(Bank::new_from_parent(bank, &Pubkey::default(), slot)); - assert_eq!( - bank.load_slow(&bank.ancestors, &stake_id1) - .unwrap() - .0 - .rent_epoch(), - RENT_EXEMPT_RENT_EPOCH - ); -} - #[test] fn test_calculate_fee_with_congestion_multiplier() { let lamports_scale: u64 = 5; @@ -13744,10 +13657,9 @@ fn test_filter_executable_program_accounts() { ); let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); - let ancestors = vec![(0, 0)].into_iter().collect(); let owners = &[program1_pubkey, program2_pubkey]; - let programs = bank.filter_executable_program_accounts( - &ancestors, + let programs = TransactionBatchProcessor::::filter_executable_program_accounts( + &bank, &[sanitized_tx1, sanitized_tx2], &mut [(Ok(()), None, Some(0)), (Ok(()), None, Some(0))], owners, @@ -13839,11 +13751,10 @@ fn test_filter_executable_program_accounts_invalid_blockhash() { // Let's not register blockhash from tx2. This should cause the tx2 to fail let sanitized_tx2 = SanitizedTransaction::from_transaction_for_tests(tx2); - let ancestors = vec![(0, 0)].into_iter().collect(); let owners = &[program1_pubkey, program2_pubkey]; let mut lock_results = vec![(Ok(()), None, Some(0)), (Ok(()), None, None)]; - let programs = bank.filter_executable_program_accounts( - &ancestors, + let programs = TransactionBatchProcessor::::filter_executable_program_accounts( + &bank, &[sanitized_tx1, sanitized_tx2], &mut lock_results, owners, diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index d481bf1b43bda8..668062c8d31cce 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -221,8 +221,9 @@ impl BankForks { } pub fn insert(&mut self, mut bank: Bank) -> BankWithScheduler { - bank.check_program_modification_slot = - self.root.load(Ordering::Relaxed) < self.highest_slot_at_startup; + if self.root.load(Ordering::Relaxed) < self.highest_slot_at_startup { + bank.check_program_modification_slot(); + } let bank = Arc::new(bank); let bank = if let Some(scheduler_pool) = &self.scheduler_pool { diff --git a/runtime/src/transaction_priority_details.rs b/runtime/src/compute_budget_details.rs similarity index 74% rename from runtime/src/transaction_priority_details.rs rename to runtime/src/compute_budget_details.rs index 284acb791a2e6a..69756d4567ff70 100644 --- a/runtime/src/transaction_priority_details.rs +++ b/runtime/src/compute_budget_details.rs @@ -8,34 +8,34 @@ use { }; #[derive(Clone, Debug, PartialEq, Eq)] -pub struct TransactionPriorityDetails { - pub priority: u64, +pub struct ComputeBudgetDetails { + pub compute_unit_price: u64, pub compute_unit_limit: u64, } -pub trait GetTransactionPriorityDetails { - fn get_transaction_priority_details( +pub trait GetComputeBudgetDetails { + fn get_compute_budget_details( &self, round_compute_unit_price_enabled: bool, - ) -> Option; + ) -> Option; fn process_compute_budget_instruction<'a>( instructions: impl Iterator, _round_compute_unit_price_enabled: bool, - ) -> Option { + ) -> Option { let compute_budget_limits = process_compute_budget_instructions(instructions).ok()?; - Some(TransactionPriorityDetails { - priority: compute_budget_limits.compute_unit_price, + Some(ComputeBudgetDetails { + compute_unit_price: compute_budget_limits.compute_unit_price, compute_unit_limit: u64::from(compute_budget_limits.compute_unit_limit), }) } } -impl GetTransactionPriorityDetails for SanitizedVersionedTransaction { - fn get_transaction_priority_details( +impl GetComputeBudgetDetails for SanitizedVersionedTransaction { + fn get_compute_budget_details( &self, round_compute_unit_price_enabled: bool, - ) -> Option { + ) -> Option { Self::process_compute_budget_instruction( self.get_message().program_instructions_iter(), round_compute_unit_price_enabled, @@ -43,11 +43,11 @@ impl GetTransactionPriorityDetails for SanitizedVersionedTransaction { } } -impl GetTransactionPriorityDetails for SanitizedTransaction { - fn get_transaction_priority_details( +impl GetComputeBudgetDetails for SanitizedTransaction { + fn get_compute_budget_details( &self, round_compute_unit_price_enabled: bool, - ) -> Option { + ) -> Option { Self::process_compute_budget_instruction( self.message().program_instructions_iter(), round_compute_unit_price_enabled, @@ -70,7 +70,7 @@ mod tests { }; #[test] - fn test_get_priority_with_valid_request_heap_frame_tx() { + fn test_get_compute_budget_details_with_valid_request_heap_frame_tx() { let keypair = Keypair::new(); let transaction = Transaction::new_unsigned(Message::new( &[ @@ -85,9 +85,9 @@ mod tests { let sanitized_versioned_transaction = SanitizedVersionedTransaction::try_new(versioned_transaction).unwrap(); assert_eq!( - sanitized_versioned_transaction.get_transaction_priority_details(false), - Some(TransactionPriorityDetails { - priority: 0, + sanitized_versioned_transaction.get_compute_budget_details(false), + Some(ComputeBudgetDetails { + compute_unit_price: 0, compute_unit_limit: solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, @@ -98,9 +98,9 @@ mod tests { let sanitized_transaction = SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); assert_eq!( - sanitized_transaction.get_transaction_priority_details(false), - Some(TransactionPriorityDetails { - priority: 0, + sanitized_transaction.get_compute_budget_details(false), + Some(ComputeBudgetDetails { + compute_unit_price: 0, compute_unit_limit: solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, @@ -109,7 +109,7 @@ mod tests { } #[test] - fn test_get_priority_with_valid_set_compute_units_limit() { + fn test_get_compute_budget_details_with_valid_set_compute_units_limit() { let requested_cu = 101u32; let keypair = Keypair::new(); let transaction = Transaction::new_unsigned(Message::new( @@ -125,9 +125,9 @@ mod tests { let sanitized_versioned_transaction = SanitizedVersionedTransaction::try_new(versioned_transaction).unwrap(); assert_eq!( - sanitized_versioned_transaction.get_transaction_priority_details(false), - Some(TransactionPriorityDetails { - priority: 0, + sanitized_versioned_transaction.get_compute_budget_details(false), + Some(ComputeBudgetDetails { + compute_unit_price: 0, compute_unit_limit: requested_cu as u64, }) ); @@ -136,16 +136,16 @@ mod tests { let sanitized_transaction = SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); assert_eq!( - sanitized_transaction.get_transaction_priority_details(false), - Some(TransactionPriorityDetails { - priority: 0, + sanitized_transaction.get_compute_budget_details(false), + Some(ComputeBudgetDetails { + compute_unit_price: 0, compute_unit_limit: requested_cu as u64, }) ); } #[test] - fn test_get_priority_with_valid_set_compute_unit_price() { + fn test_get_compute_budget_details_with_valid_set_compute_unit_price() { let requested_price = 1_000; let keypair = Keypair::new(); let transaction = Transaction::new_unsigned(Message::new( @@ -161,9 +161,9 @@ mod tests { let sanitized_versioned_transaction = SanitizedVersionedTransaction::try_new(versioned_transaction).unwrap(); assert_eq!( - sanitized_versioned_transaction.get_transaction_priority_details(false), - Some(TransactionPriorityDetails { - priority: requested_price, + sanitized_versioned_transaction.get_compute_budget_details(false), + Some(ComputeBudgetDetails { + compute_unit_price: requested_price, compute_unit_limit: solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, @@ -174,9 +174,9 @@ mod tests { let sanitized_transaction = SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); assert_eq!( - sanitized_transaction.get_transaction_priority_details(false), - Some(TransactionPriorityDetails { - priority: requested_price, + sanitized_transaction.get_compute_budget_details(false), + Some(ComputeBudgetDetails { + compute_unit_price: requested_price, compute_unit_limit: solana_program_runtime::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64, diff --git a/runtime/src/epoch_rewards_hasher.rs b/runtime/src/epoch_rewards_hasher.rs index 120bb0c2c98500..b594b05a5cfe3b 100644 --- a/runtime/src/epoch_rewards_hasher.rs +++ b/runtime/src/epoch_rewards_hasher.rs @@ -9,7 +9,7 @@ pub(crate) fn hash_rewards_into_partitions( num_partitions: usize, ) -> Vec { let hasher = EpochRewardsHasher::new(num_partitions, parent_blockhash); - let mut rewards = vec![vec![]; num_partitions]; + let mut result = vec![vec![]; num_partitions]; for reward in stake_rewards { // clone here so the hasher's state is re-used on each call to `hash_address_to_partition`. @@ -18,9 +18,9 @@ pub(crate) fn hash_rewards_into_partitions( let partition_index = hasher .clone() .hash_address_to_partition(&reward.stake_pubkey); - rewards[partition_index].push(reward); + result[partition_index].push(reward); } - rewards + result } #[cfg(test)] diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 0612ac0cca74d2..fac4169301004d 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -11,6 +11,7 @@ pub mod bank_forks; pub mod bank_utils; pub mod builtins; pub mod commitment; +pub mod compute_budget_details; mod epoch_rewards_hasher; pub mod epoch_stakes; pub mod genesis_utils; @@ -21,7 +22,6 @@ pub mod non_circulating_supply; pub mod prioritization_fee; pub mod prioritization_fee_cache; pub mod root_bank_cache; -pub mod runtime_config; pub mod serde_snapshot; pub mod snapshot_archive_info; pub mod snapshot_bank_utils; @@ -36,9 +36,7 @@ pub mod stake_weighted_timestamp; pub mod stakes; pub mod static_ids; pub mod status_cache; -pub mod svm; pub mod transaction_batch; -pub mod transaction_priority_details; #[macro_use] extern crate solana_metrics; diff --git a/runtime/src/prioritization_fee.rs b/runtime/src/prioritization_fee.rs index bb5f7632c97e01..90cc66b981ce3a 100644 --- a/runtime/src/prioritization_fee.rs +++ b/runtime/src/prioritization_fee.rs @@ -124,9 +124,9 @@ pub enum PrioritizationFeeError { // minimum fees. FailGetTransactionAccountLocks, - // Not able to read priority details, including compute-unit price, from transaction. + // Not able to read compute budget details, including compute-unit price, from transaction. // Compute-unit price is required to update block minimum fees. - FailGetTransactionPriorityDetails, + FailGetComputeBudgetDetails, // Block is already finalized, trying to finalize it again is usually unexpected BlockIsAlreadyFinalized, diff --git a/runtime/src/prioritization_fee_cache.rs b/runtime/src/prioritization_fee_cache.rs index ece749387a9147..839519020ff42f 100644 --- a/runtime/src/prioritization_fee_cache.rs +++ b/runtime/src/prioritization_fee_cache.rs @@ -1,8 +1,5 @@ use { - crate::{ - bank::Bank, prioritization_fee::*, - transaction_priority_details::GetTransactionPriorityDetails, - }, + crate::{bank::Bank, compute_budget_details::GetComputeBudgetDetails, prioritization_fee::*}, crossbeam_channel::{unbounded, Receiver, Sender}, dashmap::DashMap, log::*, @@ -208,8 +205,8 @@ impl PrioritizationFeeCache { } } - /// Update with a list of non-vote transactions' tx_priority_details and tx_account_locks; Only - /// transactions have both valid priority_detail and account_locks will be used to update + /// Update with a list of non-vote transactions' compute_budget_details and account_locks; Only + /// transactions have both valid compute_budget_details and account_locks will be used to update /// fee_cache asynchronously. pub fn update<'a>(&self, bank: &Bank, txs: impl Iterator) { let (_, send_updates_time) = measure!( @@ -222,19 +219,19 @@ impl PrioritizationFeeCache { } let round_compute_unit_price_enabled = false; // TODO: bank.feture_set.is_active(round_compute_unit_price) - let priority_details = sanitized_transaction - .get_transaction_priority_details(round_compute_unit_price_enabled); + let compute_budget_details = sanitized_transaction + .get_compute_budget_details(round_compute_unit_price_enabled); let account_locks = sanitized_transaction .get_account_locks(bank.get_transaction_account_lock_limit()); - if priority_details.is_none() || account_locks.is_err() { + if compute_budget_details.is_none() || account_locks.is_err() { continue; } - let priority_details = priority_details.unwrap(); + let compute_budget_details = compute_budget_details.unwrap(); // filter out any transaction that requests zero compute_unit_limit // since its priority fee amount is not instructive - if priority_details.compute_unit_limit == 0 { + if compute_budget_details.compute_unit_limit == 0 { continue; } @@ -251,7 +248,7 @@ impl PrioritizationFeeCache { .send(CacheServiceUpdate::TransactionUpdate { slot: bank.slot(), bank_id: bank.bank_id(), - transaction_fee: priority_details.priority, + transaction_fee: compute_budget_details.compute_unit_price, writable_accounts, }) .unwrap_or_else(|err| { diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index ddcaef833b8275..4b066976d49048 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -3,7 +3,6 @@ use { bank::{Bank, BankFieldsToDeserialize, BankRc}, builtins::BuiltinPrototype, epoch_stakes::EpochStakes, - runtime_config::RuntimeConfig, serde_snapshot::storage::SerializableAccountStorageEntry, snapshot_utils::{ self, SnapshotError, StorageAndNextAppendVecId, BANK_SNAPSHOT_PRE_FILENAME_EXTENSION, @@ -26,7 +25,6 @@ use { accounts_update_notifier_interface::AccountsUpdateNotifier, blockhash_queue::BlockhashQueue, epoch_accounts_hash::EpochAccountsHash, - rent_collector::RentCollector, }, solana_measure::measure::Measure, solana_sdk::{ @@ -39,7 +37,9 @@ use { hash::Hash, inflation::Inflation, pubkey::Pubkey, + rent_collector::RentCollector, }, + solana_svm::runtime_config::RuntimeConfig, std::{ collections::{HashMap, HashSet}, io::{self, BufReader, BufWriter, Read, Write}, diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index f9d45b372f5fc4..510069c92662fc 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -23,7 +23,6 @@ mod serde_snapshot_tests { accounts_hash::AccountsHash, accounts_index::AccountSecondaryIndexes, ancestors::Ancestors, - rent_collector::RentCollector, }, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, @@ -32,6 +31,7 @@ mod serde_snapshot_tests { genesis_config::{ClusterType, GenesisConfig}, hash::Hash, pubkey::Pubkey, + rent_collector::RentCollector, }, std::{ io::{BufReader, Cursor, Read, Write}, diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 5494eb1beb716c..dfeda8e59e0fe1 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -2,7 +2,6 @@ use { crate::{ bank::{Bank, BankFieldsToDeserialize, BankSlotDelta}, builtins::BuiltinPrototype, - runtime_config::RuntimeConfig, serde_snapshot::{ bank_from_streams, bank_to_stream, fields_from_streams, BankIncrementalSnapshotPersistence, SerdeStyle, @@ -46,6 +45,7 @@ use { pubkey::Pubkey, slot_history::{Check, SlotHistory}, }, + solana_svm::runtime_config::RuntimeConfig, std::{ collections::HashSet, fs, diff --git a/runtime/src/snapshot_package.rs b/runtime/src/snapshot_package.rs index 99af3ebbe6ee2a..55a4b13744b4f4 100644 --- a/runtime/src/snapshot_package.rs +++ b/runtime/src/snapshot_package.rs @@ -11,9 +11,11 @@ use { accounts_db::{AccountStorageEntry, AccountsDb}, accounts_hash::{AccountsHash, AccountsHashKind}, epoch_accounts_hash::EpochAccountsHash, - rent_collector::RentCollector, }, - solana_sdk::{clock::Slot, feature_set, sysvar::epoch_schedule::EpochSchedule}, + solana_sdk::{ + clock::Slot, feature_set, rent_collector::RentCollector, + sysvar::epoch_schedule::EpochSchedule, + }, std::{ path::{Path, PathBuf}, sync::Arc, diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index ff0afc1e779b0a..1bd9c4d254958d 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -23,7 +23,7 @@ use { append_vec::AppendVec, hardened_unpack::{self, ParallelSelector, UnpackError}, shared_buffer_reader::{SharedBuffer, SharedBufferReader}, - utils::{delete_contents_of_path, ACCOUNTS_RUN_DIR, ACCOUNTS_SNAPSHOT_DIR}, + utils::{move_and_async_delete_path, ACCOUNTS_RUN_DIR, ACCOUNTS_SNAPSHOT_DIR}, }, solana_measure::{measure, measure::Measure}, solana_sdk::{clock::Slot, hash::Hash}, @@ -36,7 +36,7 @@ use { path::{Path, PathBuf}, process::ExitStatus, str::FromStr, - sync::{Arc, Mutex}, + sync::Arc, thread::{Builder, JoinHandle}, }, tar::{self, Archive}, @@ -529,82 +529,6 @@ pub enum GetSnapshotAccountsHardLinkDirError { }, } -/// Moves and asynchronously deletes the contents of a directory to avoid blocking on it. -/// The directory is re-created after the move, and should now be empty. -pub fn move_and_async_delete_path_contents(path: impl AsRef) { - move_and_async_delete_path(&path); - // The following could fail if the rename failed. - // If that happens, the directory should be left as is. - // So we ignore errors here. - _ = std::fs::create_dir(path); -} - -/// Delete directories/files asynchronously to avoid blocking on it. -/// First, in sync context, check if the original path exists, if it -/// does, rename the original path to *_to_be_deleted. -/// If there's an in-progress deleting thread for this path, return. -/// Then spawn a thread to delete the renamed path. -pub fn move_and_async_delete_path(path: impl AsRef) { - lazy_static! { - static ref IN_PROGRESS_DELETES: Mutex> = Mutex::new(HashSet::new()); - }; - - // Grab the mutex so no new async delete threads can be spawned for this path. - let mut lock = IN_PROGRESS_DELETES.lock().unwrap(); - - // If the path does not exist, there's nothing to delete. - if !path.as_ref().exists() { - return; - } - - // If the original path (`pathbuf` here) is already being deleted, - // then the path should not be moved and deleted again. - if lock.contains(path.as_ref()) { - return; - } - - let mut path_delete = path.as_ref().to_path_buf(); - path_delete.set_file_name(format!( - "{}{}", - path_delete.file_name().unwrap().to_str().unwrap(), - "_to_be_deleted" - )); - if let Err(err) = fs::rename(&path, &path_delete) { - warn!( - "Cannot async delete, retrying in sync mode: failed to rename '{}' to '{}': {err}", - path.as_ref().display(), - path_delete.display(), - ); - // Although the delete here is synchronous, we want to prevent another thread - // from moving & deleting this directory via `move_and_async_delete_path`. - lock.insert(path.as_ref().to_path_buf()); - drop(lock); // unlock before doing sync delete - - delete_contents_of_path(&path); - IN_PROGRESS_DELETES.lock().unwrap().remove(path.as_ref()); - return; - } - - lock.insert(path_delete.clone()); - drop(lock); - Builder::new() - .name("solDeletePath".to_string()) - .spawn(move || { - trace!("background deleting {}...", path_delete.display()); - let (result, measure_delete) = measure!(fs::remove_dir_all(&path_delete)); - if let Err(err) = result { - panic!("Failed to async delete '{}': {err}", path_delete.display()); - } - trace!( - "background deleting {}... Done, and{measure_delete}", - path_delete.display() - ); - - IN_PROGRESS_DELETES.lock().unwrap().remove(&path_delete); - }) - .expect("spawn background delete thread"); -} - /// The account snapshot directories under /snapshot/ contain account files hardlinked /// from /run taken at snapshot time. They are referenced by the symlinks from the /// bank snapshot dir snapshot//accounts_hardlinks/. We observed that sometimes the bank snapshot dir diff --git a/runtime/src/svm/mod.rs b/runtime/src/svm/mod.rs deleted file mode 100644 index d026b8f3abb26e..00000000000000 --- a/runtime/src/svm/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod account_loader; -pub mod account_rent_state; -pub mod transaction_account_state_info; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 7897a24d1a1be7..624eb0ea639014 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.75.0" +channel = "1.76.0" diff --git a/sdk/docker-solana/build.sh b/sdk/docker-solana/build.sh index 77160d73edbc38..f1c8ee265d6d56 100755 --- a/sdk/docker-solana/build.sh +++ b/sdk/docker-solana/build.sh @@ -20,8 +20,7 @@ fi cd "$(dirname "$0")" rm -rf usr/ -../../ci/docker-run.sh "$rust_stable_docker_image" \ - scripts/cargo-install-all.sh sdk/docker-solana/usr +../../ci/docker-run-default-image.sh scripts/cargo-install-all.sh sdk/docker-solana/usr cp -f ../../scripts/run.sh usr/bin/solana-run.sh cp -f ../../fetch-spl.sh usr/bin/ diff --git a/sdk/program/src/epoch_rewards_partition_data.rs b/sdk/program/src/epoch_rewards_partition_data.rs deleted file mode 100644 index 2ff511af8fb72b..00000000000000 --- a/sdk/program/src/epoch_rewards_partition_data.rs +++ /dev/null @@ -1,39 +0,0 @@ -use { - crate::{hash::Hash, pubkey::Pubkey}, - serde_derive::{Deserialize, Serialize}, -}; - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] -pub enum EpochRewardsPartitionDataVersion { - V0(PartitionData), -} - -impl EpochRewardsPartitionDataVersion { - pub fn get_hasher_kind(&self) -> HasherKind { - match self { - EpochRewardsPartitionDataVersion::V0(_) => HasherKind::Sip13, - } - } -} - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] -pub enum HasherKind { - Sip13, -} - -/// Data about a rewards partitions for an epoch -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] -pub struct PartitionData { - /// Number of partitions used for epoch rewards this epoch - pub num_partitions: usize, - /// Blockhash of the last block of the previous epoch, used to create EpochRewardsHasher - pub parent_blockhash: Hash, -} - -pub fn get_epoch_rewards_partition_data_address(epoch: u64) -> Pubkey { - let (address, _bump_seed) = Pubkey::find_program_address( - &[b"EpochRewards", b"PartitionData", &epoch.to_le_bytes()], - &crate::sysvar::id(), - ); - address -} diff --git a/sdk/program/src/instruction.rs b/sdk/program/src/instruction.rs index db26af5ad04fde..572fa42c69796d 100644 --- a/sdk/program/src/instruction.rs +++ b/sdk/program/src/instruction.rs @@ -28,9 +28,8 @@ use { /// an error be consistent across software versions. For example, it is /// dangerous to include error strings from 3rd party crates because they could /// change at any time and changes to them are difficult to detect. -#[derive( - Serialize, Deserialize, Debug, Error, PartialEq, Eq, Clone, AbiExample, AbiEnumVisitor, -)] +#[cfg_attr(not(target_os = "solana"), derive(AbiExample, AbiEnumVisitor))] +#[derive(Serialize, Deserialize, Debug, Error, PartialEq, Eq, Clone)] pub enum InstructionError { /// Deprecated! Use CustomError instead! /// The program instruction returned an error diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index 016585d403ae2a..54de9d817205a8 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -491,7 +491,6 @@ pub mod ed25519_program; pub mod entrypoint; pub mod entrypoint_deprecated; pub mod epoch_rewards; -pub mod epoch_rewards_partition_data; pub mod epoch_schedule; pub mod feature; pub mod fee_calculator; diff --git a/sdk/program/src/program_error.rs b/sdk/program/src/program_error.rs index 0840ee16b901d7..9881ef345f0159 100644 --- a/sdk/program/src/program_error.rs +++ b/sdk/program/src/program_error.rs @@ -63,6 +63,10 @@ pub enum ProgramError { InvalidAccountOwner, #[error("Program arithmetic overflowed")] ArithmeticOverflow, + #[error("Account is immutable")] + Immutable, + #[error("Incorrect authority provided")] + IncorrectAuthority, } pub trait PrintProgramError { @@ -113,6 +117,8 @@ impl PrintProgramError for ProgramError { } Self::InvalidAccountOwner => msg!("Error: InvalidAccountOwner"), Self::ArithmeticOverflow => msg!("Error: ArithmeticOverflow"), + Self::Immutable => msg!("Error: Immutable"), + Self::IncorrectAuthority => msg!("Error: IncorrectAuthority"), } } } @@ -149,6 +155,8 @@ pub const MAX_INSTRUCTION_TRACE_LENGTH_EXCEEDED: u64 = to_builtin!(21); pub const BUILTIN_PROGRAMS_MUST_CONSUME_COMPUTE_UNITS: u64 = to_builtin!(22); pub const INVALID_ACCOUNT_OWNER: u64 = to_builtin!(23); pub const ARITHMETIC_OVERFLOW: u64 = to_builtin!(24); +pub const IMMUTABLE: u64 = to_builtin!(25); +pub const INCORRECT_AUTHORITY: u64 = to_builtin!(26); // Warning: Any new program errors added here must also be: // - Added to the below conversions // - Added as an equivalent to InstructionError @@ -187,6 +195,8 @@ impl From for u64 { } ProgramError::InvalidAccountOwner => INVALID_ACCOUNT_OWNER, ProgramError::ArithmeticOverflow => ARITHMETIC_OVERFLOW, + ProgramError::Immutable => IMMUTABLE, + ProgramError::IncorrectAuthority => INCORRECT_AUTHORITY, ProgramError::Custom(error) => { if error == 0 { CUSTOM_ZERO @@ -227,6 +237,8 @@ impl From for ProgramError { } INVALID_ACCOUNT_OWNER => Self::InvalidAccountOwner, ARITHMETIC_OVERFLOW => Self::ArithmeticOverflow, + IMMUTABLE => Self::Immutable, + INCORRECT_AUTHORITY => Self::IncorrectAuthority, _ => Self::Custom(error as u32), } } @@ -267,6 +279,8 @@ impl TryFrom for ProgramError { } Self::Error::InvalidAccountOwner => Ok(Self::InvalidAccountOwner), Self::Error::ArithmeticOverflow => Ok(Self::ArithmeticOverflow), + Self::Error::Immutable => Ok(Self::Immutable), + Self::Error::IncorrectAuthority => Ok(Self::IncorrectAuthority), _ => Err(error), } } @@ -305,6 +319,8 @@ where } INVALID_ACCOUNT_OWNER => Self::InvalidAccountOwner, ARITHMETIC_OVERFLOW => Self::ArithmeticOverflow, + IMMUTABLE => Self::Immutable, + INCORRECT_AUTHORITY => Self::IncorrectAuthority, _ => { // A valid custom error has no bits set in the upper 32 if error >> BUILTIN_BIT_SHIFT == 0 { diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index e64d6ddc57d0fd..7c6b643884e449 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -48,8 +48,8 @@ pub use solana_program::{ account_info, address_lookup_table, alt_bn128, big_mod_exp, blake3, borsh, borsh0_10, borsh0_9, borsh1, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, clock, config, custom_heap_default, custom_panic_default, debug_account_data, declare_deprecated_sysvar_id, - declare_sysvar_id, decode_error, ed25519_program, epoch_rewards, epoch_rewards_partition_data, - epoch_schedule, fee_calculator, impl_sysvar_get, incinerator, instruction, keccak, lamports, + declare_sysvar_id, decode_error, ed25519_program, epoch_rewards, epoch_schedule, + fee_calculator, impl_sysvar_get, incinerator, instruction, keccak, lamports, loader_instruction, loader_upgradeable_instruction, loader_v4, loader_v4_instruction, message, msg, native_token, nonce, poseidon, program, program_error, program_memory, program_option, program_pack, rent, sanitize, sdk_ids, secp256k1_program, secp256k1_recover, serde_varint, @@ -84,6 +84,7 @@ pub mod log; pub mod native_loader; pub mod net; pub mod nonce_account; +pub mod nonce_info; pub mod offchain_message; pub mod packet; pub mod poh_config; @@ -92,6 +93,9 @@ pub mod program_utils; pub mod pubkey; pub mod quic; pub mod recent_blockhashes_account; +pub mod rent_collector; +pub mod rent_debits; +pub mod reward_info; pub mod reward_type; pub mod rpc_port; pub mod secp256k1_instruction; diff --git a/accounts-db/src/nonce_info.rs b/sdk/src/nonce_info.rs similarity index 96% rename from accounts-db/src/nonce_info.rs rename to sdk/src/nonce_info.rs index 8a6d3a40fc7ecc..585f9fa2e3a687 100644 --- a/accounts-db/src/nonce_info.rs +++ b/sdk/src/nonce_info.rs @@ -1,13 +1,12 @@ -use { - crate::rent_debits::RentDebits, - solana_sdk::{ - account::{AccountSharedData, ReadableAccount, WritableAccount}, - message::SanitizedMessage, - nonce_account, - pubkey::Pubkey, - transaction::{self, TransactionError}, - transaction_context::TransactionAccount, - }, +#![cfg(feature = "full")] +use crate::{ + account::{AccountSharedData, ReadableAccount, WritableAccount}, + message::SanitizedMessage, + nonce_account, + pubkey::Pubkey, + rent_debits::RentDebits, + transaction::{self, TransactionError}, + transaction_context::TransactionAccount, }; pub trait NonceInfo { @@ -120,7 +119,7 @@ impl NonceInfo for NonceFull { mod tests { use { super::*, - solana_sdk::{ + crate::{ hash::Hash, instruction::Instruction, message::Message, diff --git a/sdk/src/rent_collector.rs b/sdk/src/rent_collector.rs new file mode 100644 index 00000000000000..1de6ce19950dbd --- /dev/null +++ b/sdk/src/rent_collector.rs @@ -0,0 +1,477 @@ +#![cfg(feature = "full")] + +//! calculate and collect rent from Accounts +use solana_sdk::{ + account::{AccountSharedData, ReadableAccount, WritableAccount}, + clock::Epoch, + epoch_schedule::EpochSchedule, + genesis_config::GenesisConfig, + incinerator, + pubkey::Pubkey, + rent::{Rent, RentDue}, +}; + +#[derive(Serialize, Deserialize, Clone, PartialEq, Debug, AbiExample)] +pub struct RentCollector { + pub epoch: Epoch, + pub epoch_schedule: EpochSchedule, + pub slots_per_year: f64, + pub rent: Rent, +} + +impl Default for RentCollector { + fn default() -> Self { + Self { + epoch: Epoch::default(), + epoch_schedule: EpochSchedule::default(), + // derive default value using GenesisConfig::default() + slots_per_year: GenesisConfig::default().slots_per_year(), + rent: Rent::default(), + } + } +} + +/// When rent is collected from an exempt account, rent_epoch is set to this +/// value. The idea is to have a fixed, consistent value for rent_epoch for all accounts that do not collect rent. +/// This enables us to get rid of the field completely. +pub const RENT_EXEMPT_RENT_EPOCH: Epoch = Epoch::MAX; + +/// when rent is collected for this account, this is the action to apply to the account +#[derive(Debug)] +enum RentResult { + /// this account will never have rent collected from it + Exempt, + /// maybe we collect rent later, but not now + NoRentCollectionNow, + /// collect rent + CollectRent { + new_rent_epoch: Epoch, + rent_due: u64, // lamports, could be 0 + }, +} + +impl RentCollector { + pub fn new( + epoch: Epoch, + epoch_schedule: EpochSchedule, + slots_per_year: f64, + rent: Rent, + ) -> Self { + Self { + epoch, + epoch_schedule, + slots_per_year, + rent, + } + } + + pub fn clone_with_epoch(&self, epoch: Epoch) -> Self { + Self { + epoch, + ..self.clone() + } + } + + /// true if it is easy to determine this account should consider having rent collected from it + pub fn should_collect_rent(&self, address: &Pubkey, account: &impl ReadableAccount) -> bool { + !(account.executable() // executable accounts must be rent-exempt balance + || *address == incinerator::id()) + } + + /// given an account that 'should_collect_rent' + /// returns (amount rent due, is_exempt_from_rent) + pub fn get_rent_due(&self, account: &impl ReadableAccount) -> RentDue { + if self + .rent + .is_exempt(account.lamports(), account.data().len()) + { + RentDue::Exempt + } else { + let account_rent_epoch = account.rent_epoch(); + let slots_elapsed: u64 = (account_rent_epoch..=self.epoch) + .map(|epoch| { + self.epoch_schedule + .get_slots_in_epoch(epoch.saturating_add(1)) + }) + .sum(); + + // avoid infinite rent in rust 1.45 + let years_elapsed = if self.slots_per_year != 0.0 { + slots_elapsed as f64 / self.slots_per_year + } else { + 0.0 + }; + + // we know this account is not exempt + let due = self.rent.due_amount(account.data().len(), years_elapsed); + RentDue::Paying(due) + } + } + + // Updates the account's lamports and status, and returns the amount of rent collected, if any. + // This is NOT thread safe at some level. If we try to collect from the same account in + // parallel, we may collect twice. + #[must_use = "add to Bank::collected_rent"] + pub fn collect_from_existing_account( + &self, + address: &Pubkey, + account: &mut AccountSharedData, + ) -> CollectedInfo { + match self.calculate_rent_result(address, account) { + RentResult::Exempt => { + account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + CollectedInfo::default() + } + RentResult::NoRentCollectionNow => CollectedInfo::default(), + RentResult::CollectRent { + new_rent_epoch, + rent_due, + } => match account.lamports().checked_sub(rent_due) { + None | Some(0) => { + let account = std::mem::take(account); + CollectedInfo { + rent_amount: account.lamports(), + account_data_len_reclaimed: account.data().len() as u64, + } + } + Some(lamports) => { + account.set_lamports(lamports); + account.set_rent_epoch(new_rent_epoch); + CollectedInfo { + rent_amount: rent_due, + account_data_len_reclaimed: 0u64, + } + } + }, + } + } + + /// determine what should happen to collect rent from this account + #[must_use] + fn calculate_rent_result( + &self, + address: &Pubkey, + account: &impl ReadableAccount, + ) -> RentResult { + if account.rent_epoch() == RENT_EXEMPT_RENT_EPOCH || account.rent_epoch() > self.epoch { + // potentially rent paying account (or known and already marked exempt) + // Maybe collect rent later, leave account alone for now. + return RentResult::NoRentCollectionNow; + } + if !self.should_collect_rent(address, account) { + // easy to determine this account should not consider having rent collected from it + return RentResult::Exempt; + } + match self.get_rent_due(account) { + // account will not have rent collected ever + RentDue::Exempt => RentResult::Exempt, + // potentially rent paying account + // Maybe collect rent later, leave account alone for now. + RentDue::Paying(0) => RentResult::NoRentCollectionNow, + // Rent is collected for next epoch. + RentDue::Paying(rent_due) => RentResult::CollectRent { + new_rent_epoch: self.epoch.saturating_add(1), + rent_due, + }, + } + } +} + +/// Information computed during rent collection +#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)] +pub struct CollectedInfo { + /// Amount of rent collected from account + pub rent_amount: u64, + /// Size of data reclaimed from account (happens when account's lamports go to zero) + pub account_data_len_reclaimed: u64, +} + +impl std::ops::Add for CollectedInfo { + type Output = Self; + fn add(self, other: Self) -> Self { + Self { + rent_amount: self.rent_amount.saturating_add(other.rent_amount), + account_data_len_reclaimed: self + .account_data_len_reclaimed + .saturating_add(other.account_data_len_reclaimed), + } + } +} + +impl std::ops::AddAssign for CollectedInfo { + #![allow(clippy::arithmetic_side_effects)] + fn add_assign(&mut self, other: Self) { + *self = *self + other; + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + assert_matches::assert_matches, + solana_sdk::{account::Account, sysvar}, + }; + + fn default_rent_collector_clone_with_epoch(epoch: Epoch) -> RentCollector { + RentCollector::default().clone_with_epoch(epoch) + } + + impl RentCollector { + #[must_use = "add to Bank::collected_rent"] + fn collect_from_created_account( + &self, + address: &Pubkey, + account: &mut AccountSharedData, + ) -> CollectedInfo { + // initialize rent_epoch as created at this epoch + account.set_rent_epoch(self.epoch); + self.collect_from_existing_account(address, account) + } + } + + #[test] + fn test_calculate_rent_result() { + let mut rent_collector = RentCollector::default(); + + let mut account = AccountSharedData::default(); + assert_matches!( + rent_collector.calculate_rent_result(&Pubkey::default(), &account), + RentResult::NoRentCollectionNow + ); + { + let mut account_clone = account.clone(); + assert_eq!( + rent_collector + .collect_from_existing_account(&Pubkey::default(), &mut account_clone), + CollectedInfo::default() + ); + assert_eq!(account_clone, account); + } + + account.set_executable(true); + assert_matches!( + rent_collector.calculate_rent_result(&Pubkey::default(), &account), + RentResult::Exempt + ); + { + let mut account_clone = account.clone(); + let mut account_expected = account.clone(); + account_expected.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + assert_eq!( + rent_collector + .collect_from_existing_account(&Pubkey::default(), &mut account_clone), + CollectedInfo::default() + ); + assert_eq!(account_clone, account_expected); + } + + account.set_executable(false); + assert_matches!( + rent_collector.calculate_rent_result(&incinerator::id(), &account), + RentResult::Exempt + ); + { + let mut account_clone = account.clone(); + let mut account_expected = account.clone(); + account_expected.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + assert_eq!( + rent_collector + .collect_from_existing_account(&incinerator::id(), &mut account_clone), + CollectedInfo::default() + ); + assert_eq!(account_clone, account_expected); + } + + // try a few combinations of rent collector rent epoch and collecting rent + for (rent_epoch, rent_due_expected) in [(2, 2), (3, 5)] { + rent_collector.epoch = rent_epoch; + account.set_lamports(10); + account.set_rent_epoch(1); + let new_rent_epoch_expected = rent_collector.epoch + 1; + assert!( + matches!( + rent_collector.calculate_rent_result(&Pubkey::default(), &account), + RentResult::CollectRent{ new_rent_epoch, rent_due} if new_rent_epoch == new_rent_epoch_expected && rent_due == rent_due_expected, + ), + "{:?}", + rent_collector.calculate_rent_result(&Pubkey::default(), &account) + ); + + { + let mut account_clone = account.clone(); + assert_eq!( + rent_collector + .collect_from_existing_account(&Pubkey::default(), &mut account_clone), + CollectedInfo { + rent_amount: rent_due_expected, + account_data_len_reclaimed: 0 + } + ); + let mut account_expected = account.clone(); + account_expected.set_lamports(account.lamports() - rent_due_expected); + account_expected.set_rent_epoch(new_rent_epoch_expected); + assert_eq!(account_clone, account_expected); + } + } + + // enough lamports to make us exempt + account.set_lamports(1_000_000); + let result = rent_collector.calculate_rent_result(&Pubkey::default(), &account); + assert!(matches!(result, RentResult::Exempt), "{result:?}",); + { + let mut account_clone = account.clone(); + let mut account_expected = account.clone(); + account_expected.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); + assert_eq!( + rent_collector + .collect_from_existing_account(&Pubkey::default(), &mut account_clone), + CollectedInfo::default() + ); + assert_eq!(account_clone, account_expected); + } + + // enough lamports to make us exempt + // but, our rent_epoch is set in the future, so we can't know if we are exempt yet or not. + // We don't calculate rent amount vs data if the rent_epoch is already in the future. + account.set_rent_epoch(1_000_000); + assert_matches!( + rent_collector.calculate_rent_result(&Pubkey::default(), &account), + RentResult::NoRentCollectionNow + ); + { + let mut account_clone = account.clone(); + assert_eq!( + rent_collector + .collect_from_existing_account(&Pubkey::default(), &mut account_clone), + CollectedInfo::default() + ); + assert_eq!(account_clone, account); + } + } + + #[test] + fn test_collect_from_account_created_and_existing() { + let old_lamports = 1000; + let old_epoch = 1; + let new_epoch = 2; + + let (mut created_account, mut existing_account) = { + let account = AccountSharedData::from(Account { + lamports: old_lamports, + rent_epoch: old_epoch, + ..Account::default() + }); + + (account.clone(), account) + }; + + let rent_collector = default_rent_collector_clone_with_epoch(new_epoch); + + // collect rent on a newly-created account + let collected = rent_collector + .collect_from_created_account(&solana_sdk::pubkey::new_rand(), &mut created_account); + assert!(created_account.lamports() < old_lamports); + assert_eq!( + created_account.lamports() + collected.rent_amount, + old_lamports + ); + assert_ne!(created_account.rent_epoch(), old_epoch); + assert_eq!(collected.account_data_len_reclaimed, 0); + + // collect rent on a already-existing account + let collected = rent_collector + .collect_from_existing_account(&solana_sdk::pubkey::new_rand(), &mut existing_account); + assert!(existing_account.lamports() < old_lamports); + assert_eq!( + existing_account.lamports() + collected.rent_amount, + old_lamports + ); + assert_ne!(existing_account.rent_epoch(), old_epoch); + assert_eq!(collected.account_data_len_reclaimed, 0); + + // newly created account should be collected for less rent; thus more remaining balance + assert!(created_account.lamports() > existing_account.lamports()); + assert_eq!(created_account.rent_epoch(), existing_account.rent_epoch()); + } + + #[test] + fn test_rent_exempt_temporal_escape() { + for pass in 0..2 { + let mut account = AccountSharedData::default(); + let epoch = 3; + let huge_lamports = 123_456_789_012; + let tiny_lamports = 789_012; + let pubkey = solana_sdk::pubkey::new_rand(); + + assert_eq!(account.rent_epoch(), 0); + + // create a tested rent collector + let rent_collector = default_rent_collector_clone_with_epoch(epoch); + + if pass == 0 { + account.set_lamports(huge_lamports); + // first mark account as being collected while being rent-exempt + let collected = rent_collector.collect_from_existing_account(&pubkey, &mut account); + assert_eq!(account.lamports(), huge_lamports); + assert_eq!(collected, CollectedInfo::default()); + continue; + } + + // decrease the balance not to be rent-exempt + // In a real validator, it is not legal to reduce an account's lamports such that the account becomes rent paying. + // So, pass == 0 above tests the case of rent that is exempt. pass == 1 tests the case where we are rent paying. + account.set_lamports(tiny_lamports); + + // ... and trigger another rent collection on the same epoch and check that rent is working + let collected = rent_collector.collect_from_existing_account(&pubkey, &mut account); + assert_eq!(account.lamports(), tiny_lamports - collected.rent_amount); + assert_ne!(collected, CollectedInfo::default()); + } + } + + #[test] + fn test_rent_exempt_sysvar() { + let tiny_lamports = 1; + let mut account = AccountSharedData::default(); + account.set_owner(sysvar::id()); + account.set_lamports(tiny_lamports); + + let pubkey = solana_sdk::pubkey::new_rand(); + + assert_eq!(account.rent_epoch(), 0); + + let epoch = 3; + let rent_collector = default_rent_collector_clone_with_epoch(epoch); + + let collected = rent_collector.collect_from_existing_account(&pubkey, &mut account); + assert_eq!(account.lamports(), 0); + assert_eq!(collected.rent_amount, 1); + } + + /// Ensure that when an account is "rent collected" away, its data len is returned. + #[test] + fn test_collect_cleans_up_account() { + solana_logger::setup(); + let account_lamports = 1; // must be *below* rent amount + let account_data_len = 567; + let account_rent_epoch = 11; + let mut account = AccountSharedData::from(Account { + lamports: account_lamports, // <-- must be below rent-exempt amount + data: vec![u8::default(); account_data_len], + rent_epoch: account_rent_epoch, + ..Account::default() + }); + let rent_collector = default_rent_collector_clone_with_epoch(account_rent_epoch + 1); + + let collected = + rent_collector.collect_from_existing_account(&Pubkey::new_unique(), &mut account); + + assert_eq!(collected.rent_amount, account_lamports); + assert_eq!( + collected.account_data_len_reclaimed, + account_data_len as u64 + ); + assert_eq!(account, AccountSharedData::default()); + } +} diff --git a/accounts-db/src/rent_debits.rs b/sdk/src/rent_debits.rs similarity index 94% rename from accounts-db/src/rent_debits.rs rename to sdk/src/rent_debits.rs index 75d8eddec10dbd..588f7c67a2a929 100644 --- a/accounts-db/src/rent_debits.rs +++ b/sdk/src/rent_debits.rs @@ -1,6 +1,5 @@ use { - crate::stake_rewards::RewardInfo, - solana_sdk::{pubkey::Pubkey, reward_type::RewardType}, + solana_sdk::{pubkey::Pubkey, reward_info::RewardInfo, reward_type::RewardType}, std::collections::HashMap, }; diff --git a/sdk/src/reward_info.rs b/sdk/src/reward_info.rs new file mode 100644 index 00000000000000..b3b3d4a121c3ef --- /dev/null +++ b/sdk/src/reward_info.rs @@ -0,0 +1,12 @@ +use crate::reward_type::RewardType; + +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, AbiExample, Clone, Copy)] +pub struct RewardInfo { + pub reward_type: RewardType, + /// Reward amount + pub lamports: i64, + /// Account balance in lamports after `lamports` was applied + pub post_balance: u64, + /// Vote account commission when the reward was credited, only present for voting and staking rewards + pub commission: Option, +} diff --git a/send-transaction-service/src/send_transaction_service.rs b/send-transaction-service/src/send_transaction_service.rs index dd09ccc69698f5..4e4ba9956f760f 100644 --- a/send-transaction-service/src/send_transaction_service.rs +++ b/send-transaction-service/src/send_transaction_service.rs @@ -28,8 +28,8 @@ use { }, }; -/// Maximum size of the transaction queue -const MAX_TRANSACTION_QUEUE_SIZE: usize = 10_000; // This seems like a lot but maybe it needs to be bigger one day +/// Maximum size of the transaction retry pool +const MAX_TRANSACTION_RETRY_POOL_SIZE: usize = 10_000; // This seems like a lot but maybe it needs to be bigger one day /// Default retry interval const DEFAULT_RETRY_RATE_MS: u64 = 2_000; @@ -114,6 +114,8 @@ pub struct Config { pub batch_size: usize, /// How frequently batches are sent pub batch_send_rate_ms: u64, + /// When the retry pool exceeds this max size, new transactions are dropped after their first broadcast attempt + pub retry_pool_max_size: usize, } impl Default for Config { @@ -125,6 +127,7 @@ impl Default for Config { service_max_retries: DEFAULT_SERVICE_MAX_RETRIES, batch_size: DEFAULT_TRANSACTION_BATCH_SIZE, batch_send_rate_ms: DEFAULT_BATCH_SEND_RATE_MS, + retry_pool_max_size: MAX_TRANSACTION_RETRY_POOL_SIZE, } } } @@ -477,7 +480,7 @@ impl SendTransactionService { let retry_len = retry_transactions.len(); let entry = retry_transactions.entry(signature); if let Entry::Vacant(_) = entry { - if retry_len >= MAX_TRANSACTION_QUEUE_SIZE { + if retry_len >= config.retry_pool_max_size { datapoint_warn!("send_transaction_service-queue-overflow"); break; } else { diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index f6f2357c7702e4..225412dd08b315 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -1126,7 +1126,7 @@ impl ConnectionTable { }); let new_size = e_ref.len(); if e_ref.is_empty() { - e.remove_entry(); + e.swap_remove_entry(); } let connections_removed = old_size.saturating_sub(new_size); self.total_size = self.total_size.saturating_sub(connections_removed); diff --git a/svm/Cargo.toml b/svm/Cargo.toml new file mode 100644 index 00000000000000..4fdf7d9cb1a0b4 --- /dev/null +++ b/svm/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "solana-svm" +description = "Solana SVM" +documentation = "https://docs.rs/solana-svm" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +itertools = { workspace = true } +log = { workspace = true } +percentage = { workspace = true } +solana-accounts-db = { workspace = true } +solana-bpf-loader-program = { workspace = true } +solana-frozen-abi = { workspace = true } +solana-frozen-abi-macro = { workspace = true } +solana-loader-v4-program = { workspace = true } +solana-measure = { workspace = true } +solana-metrics = { workspace = true } +solana-program-runtime = { workspace = true } +solana-sdk = { workspace = true } +solana-system-program = { workspace = true } + +[lib] +crate-type = ["lib"] +name = "solana_svm" + +[dev-dependencies] +solana-accounts-db = { workspace = true, features = ["dev-context-only-utils"] } +solana-logger = { workspace = true } +solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[build-dependencies] +rustc_version = { workspace = true } + +[features] +dev-context-only-utils = [] diff --git a/svm/build.rs b/svm/build.rs new file mode 120000 index 00000000000000..ae66c237c5f4fd --- /dev/null +++ b/svm/build.rs @@ -0,0 +1 @@ +../frozen-abi/build.rs \ No newline at end of file diff --git a/runtime/src/svm/account_loader.rs b/svm/src/account_loader.rs similarity index 92% rename from runtime/src/svm/account_loader.rs rename to svm/src/account_loader.rs index 31ce63654670e5..cfe0b069f156ae 100644 --- a/runtime/src/svm/account_loader.rs +++ b/svm/src/account_loader.rs @@ -1,18 +1,12 @@ use { - crate::{bank::RewardInterval, svm::account_rent_state::RentState}, - itertools::Itertools, - log::warn, - solana_accounts_db::{ - account_overrides::AccountOverrides, - accounts::{LoadedTransaction, TransactionLoadResult, TransactionRent}, - accounts_db::AccountsDb, - ancestors::Ancestors, - nonce_info::NonceFull, - rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, - rent_debits::RentDebits, + crate::{ + account_overrides::AccountOverrides, account_rent_state::RentState, transaction_error_metrics::TransactionErrorMetrics, - transaction_results::TransactionCheckResult, + transaction_processor::TransactionProcessingCallback, }, + itertools::Itertools, + log::warn, + solana_accounts_db::accounts::{LoadedTransaction, TransactionLoadResult, TransactionRent}, solana_program_runtime::{ compute_budget_processor::process_compute_budget_instructions, loaded_programs::LoadedProgramsForTxBatch, @@ -22,37 +16,38 @@ use { create_executable_meta, is_builtin, is_executable, Account, AccountSharedData, ReadableAccount, WritableAccount, }, - feature_set::{self, include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, + feature_set::{self, include_loaded_accounts_data_size_in_fee_calculation}, fee::FeeStructure, message::SanitizedMessage, native_loader, nonce::State as NonceState, + nonce_info::{NonceFull, NoncePartial}, pubkey::Pubkey, rent::RentDue, + rent_collector::{RentCollector, RENT_EXEMPT_RENT_EPOCH}, + rent_debits::RentDebits, saturating_add_assign, sysvar::{self, instructions::construct_instructions_data}, - transaction::{Result, SanitizedTransaction, TransactionError}, + transaction::{self, Result, SanitizedTransaction, TransactionError}, transaction_context::IndexOfAccount, }, solana_system_program::{get_system_account_kind, SystemAccountKind}, std::{collections::HashMap, num::NonZeroUsize}, }; -#[allow(clippy::too_many_arguments)] -pub(crate) fn load_accounts( - accounts_db: &AccountsDb, - ancestors: &Ancestors, +pub type TransactionCheckResult = (transaction::Result<()>, Option, Option); + +pub fn load_accounts( + callbacks: &CB, txs: &[SanitizedTransaction], lock_results: &[TransactionCheckResult], error_counters: &mut TransactionErrorMetrics, - rent_collector: &RentCollector, - feature_set: &FeatureSet, fee_structure: &FeeStructure, account_overrides: Option<&AccountOverrides>, - in_reward_interval: RewardInterval, program_accounts: &HashMap, loaded_programs: &LoadedProgramsForTxBatch, ) -> Vec { + let feature_set = callbacks.get_feature_set(); txs.iter() .zip(lock_results) .map(|etx| match etx { @@ -75,15 +70,11 @@ pub(crate) fn load_accounts( // load transactions let loaded_transaction = match load_transaction_accounts( - accounts_db, - ancestors, + callbacks, tx, fee, error_counters, - rent_collector, - feature_set, account_overrides, - in_reward_interval, program_accounts, loaded_programs, ) { @@ -113,27 +104,22 @@ pub(crate) fn load_accounts( .collect() } -#[allow(clippy::too_many_arguments)] -fn load_transaction_accounts( - accounts_db: &AccountsDb, - ancestors: &Ancestors, +fn load_transaction_accounts( + callbacks: &CB, tx: &SanitizedTransaction, fee: u64, error_counters: &mut TransactionErrorMetrics, - rent_collector: &RentCollector, - feature_set: &FeatureSet, account_overrides: Option<&AccountOverrides>, - reward_interval: RewardInterval, program_accounts: &HashMap, loaded_programs: &LoadedProgramsForTxBatch, ) -> Result { - let in_reward_interval = reward_interval == RewardInterval::InsideInterval; - // NOTE: this check will never fail because `tx` is sanitized if tx.signatures().is_empty() && fee != 0 { return Err(TransactionError::MissingSignatureForFee); } + let feature_set = callbacks.get_feature_set(); + // There is no way to predict what program will execute without an error // If a fee can pay for execution then the program will be scheduled let mut validated_fee_payer = false; @@ -143,9 +129,7 @@ fn load_transaction_accounts( let mut accounts_found = Vec::with_capacity(account_keys.len()); let mut account_deps = Vec::with_capacity(account_keys.len()); let mut rent_debits = RentDebits::default(); - - let set_exempt_rent_epoch_max = - feature_set.is_active(&solana_sdk::feature_set::set_exempt_rent_epoch_max::id()); + let rent_collector = callbacks.get_rent_collector(); let requested_loaded_accounts_data_size_limit = get_requested_loaded_accounts_data_size_limit(tx)?; @@ -183,19 +167,15 @@ fn load_transaction_accounts( account_shared_data_from_program(key, program_accounts) .map(|program_account| (program.account_size, program_account, 0))? } else { - accounts_db - .load_with_fixed_root(ancestors, key) - .map(|(mut account, _)| { + callbacks + .get_account_shared_data(key) + .map(|mut account| { if message.is_writable(i) { if !feature_set .is_active(&feature_set::disable_rent_fees_collection::id()) { let rent_due = rent_collector - .collect_from_existing_account( - key, - &mut account, - set_exempt_rent_epoch_max, - ) + .collect_from_existing_account(key, &mut account) .rent_amount; (account.data().len(), account, rent_due) @@ -204,10 +184,8 @@ fn load_transaction_accounts( // are any rent paying accounts, their `rent_epoch` won't change either. However, if the // account itself is rent-exempted but its `rent_epoch` is not u64::MAX, we will set its // `rent_epoch` to u64::MAX. In such case, the behavior stays the same as before. - if set_exempt_rent_epoch_max - && (account.rent_epoch() != RENT_EXEMPT_RENT_EPOCH - && rent_collector.get_rent_due(&account) - == RentDue::Exempt) + if account.rent_epoch() != RENT_EXEMPT_RENT_EPOCH + && rent_collector.get_rent_due(&account) == RentDue::Exempt { account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); } @@ -220,12 +198,10 @@ fn load_transaction_accounts( .unwrap_or_else(|| { account_found = false; let mut default_account = AccountSharedData::default(); - if set_exempt_rent_epoch_max { - // All new accounts must be rent-exempt (enforced in Bank::execute_loaded_transaction). - // Currently, rent collection sets rent_epoch to u64::MAX, but initializing the account - // with this field already set would allow us to skip rent collection for these accounts. - default_account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - } + // All new accounts must be rent-exempt (enforced in Bank::execute_loaded_transaction). + // Currently, rent collection sets rent_epoch to u64::MAX, but initializing the account + // with this field already set would allow us to skip rent collection for these accounts. + default_account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); (default_account.data().len(), default_account, 0) }) }; @@ -253,15 +229,7 @@ fn load_transaction_accounts( validated_fee_payer = true; } - if in_reward_interval - && message.is_writable(i) - && solana_stake_program::check_id(account.owner()) - { - error_counters.program_execution_temporarily_restricted += 1; - return Err(TransactionError::ProgramExecutionTemporarilyRestricted { - account_index: i as u8, - }); - } + callbacks.check_account_access(tx, i, &account, error_counters)?; tx_rent += rent; rent_debits.insert(key, rent, account.lamports()); @@ -306,7 +274,7 @@ fn load_transaction_accounts( return Err(TransactionError::ProgramAccountNotFound); } - if !(is_builtin(program_account) || is_executable(program_account, feature_set)) { + if !(is_builtin(program_account) || is_executable(program_account, &feature_set)) { error_counters.invalid_program_for_execution += 1; return Err(TransactionError::InvalidProgramForExecution); } @@ -324,12 +292,10 @@ fn load_transaction_accounts( builtins_start_index.saturating_add(owner_index) } else { let owner_index = accounts.len(); - if let Some((owner_account, _)) = - accounts_db.load_with_fixed_root(ancestors, owner_id) - { + if let Some(owner_account) = callbacks.get_account_shared_data(owner_id) { if !native_loader::check_id(owner_account.owner()) || !(is_builtin(&owner_account) - || is_executable(&owner_account, feature_set)) + || is_executable(&owner_account, &feature_set)) { error_counters.invalid_program_for_execution += 1; return Err(TransactionError::InvalidProgramForExecution); @@ -484,7 +450,7 @@ mod tests { use { super::*, nonce::state::Versions as NonceVersions, - solana_accounts_db::{accounts::Accounts, rent_collector::RentCollector}, + solana_accounts_db::{accounts::Accounts, accounts_db::AccountsDb, ancestors::Ancestors}, solana_program_runtime::{ compute_budget_processor, prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, @@ -494,11 +460,13 @@ mod tests { bpf_loader_upgradeable, compute_budget::ComputeBudgetInstruction, epoch_schedule::EpochSchedule, + feature_set::FeatureSet, hash::Hash, instruction::CompiledInstruction, message::{Message, SanitizedMessage}, nonce, rent::Rent, + rent_collector::RentCollector, signature::{Keypair, Signer}, system_program, sysvar, transaction::{Result, Transaction, TransactionError}, @@ -507,6 +475,37 @@ mod tests { std::{convert::TryFrom, sync::Arc}, }; + struct TestCallbacks { + accounts: Accounts, + ancestors: Ancestors, + rent_collector: RentCollector, + feature_set: Arc, + } + + impl TransactionProcessingCallback for TestCallbacks { + fn account_matches_owners(&self, _account: &Pubkey, _owners: &[Pubkey]) -> Option { + None + } + + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { + self.accounts + .load_without_fixed_root(&self.ancestors, pubkey) + .map(|(acc, _slot)| acc) + } + + fn get_last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) { + (Hash::new_unique(), 0) + } + + fn get_rent_collector(&self) -> &RentCollector { + &self.rent_collector + } + + fn get_feature_set(&self) -> Arc { + self.feature_set.clone() + } + } + fn load_accounts_with_fee_and_rent( tx: Transaction, ka: &[TransactionAccount], @@ -525,17 +524,19 @@ mod tests { let ancestors = vec![(0, 0)].into_iter().collect(); feature_set.deactivate(&feature_set::disable_rent_fees_collection::id()); let sanitized_tx = SanitizedTransaction::from_transaction_for_tests(tx); + let callbacks = TestCallbacks { + accounts, + ancestors, + rent_collector: rent_collector.clone(), + feature_set: Arc::new(feature_set.clone()), + }; load_accounts( - &accounts.accounts_db, - &ancestors, + &callbacks, &[sanitized_tx], &[(Ok(()), None, Some(lamports_per_signature))], error_counters, - rent_collector, - feature_set, fee_structure, None, - RewardInterval::OutsideInterval, &HashMap::new(), &LoadedProgramsForTxBatch::default(), ) @@ -990,26 +991,27 @@ mod tests { } fn load_accounts_no_store( - accounts: &Accounts, + accounts: Accounts, tx: Transaction, account_overrides: Option<&AccountOverrides>, ) -> Vec { let tx = SanitizedTransaction::from_transaction_for_tests(tx); - let rent_collector = RentCollector::default(); let ancestors = vec![(0, 0)].into_iter().collect(); let mut error_counters = TransactionErrorMetrics::default(); + let callbacks = TestCallbacks { + accounts, + ancestors, + rent_collector: RentCollector::default(), + feature_set: Arc::new(FeatureSet::all_enabled()), + }; load_accounts( - &accounts.accounts_db, - &ancestors, + &callbacks, &[tx], &[(Ok(()), None, Some(10))], &mut error_counters, - &rent_collector, - &FeatureSet::all_enabled(), &FeeStructure::default(), account_overrides, - RewardInterval::OutsideInterval, &HashMap::new(), &LoadedProgramsForTxBatch::default(), ) @@ -1032,7 +1034,7 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts_no_store(&accounts, tx, None); + let loaded_accounts = load_accounts_no_store(accounts, tx, None); assert_eq!(loaded_accounts.len(), 1); assert!(loaded_accounts[0].0.is_err()); } @@ -1060,7 +1062,7 @@ mod tests { instructions, ); - let loaded_accounts = load_accounts_no_store(&accounts, tx, Some(&account_overrides)); + let loaded_accounts = load_accounts_no_store(accounts, tx, Some(&account_overrides)); assert_eq!(loaded_accounts.len(), 1); let loaded_transaction = loaded_accounts[0].0.as_ref().unwrap(); assert_eq!(loaded_transaction.accounts[0].0, keypair.pubkey()); diff --git a/accounts-db/src/account_overrides.rs b/svm/src/account_overrides.rs similarity index 100% rename from accounts-db/src/account_overrides.rs rename to svm/src/account_overrides.rs diff --git a/runtime/src/svm/account_rent_state.rs b/svm/src/account_rent_state.rs similarity index 99% rename from runtime/src/svm/account_rent_state.rs rename to svm/src/account_rent_state.rs index 3fc71ac6a27686..38cda820f8ceb7 100644 --- a/runtime/src/svm/account_rent_state.rs +++ b/svm/src/account_rent_state.rs @@ -10,7 +10,7 @@ use { }; #[derive(Debug, PartialEq, Eq)] -pub(crate) enum RentState { +pub enum RentState { /// account.lamports == 0 Uninitialized, /// 0 < account.lamports < rent-exempt-minimum diff --git a/svm/src/lib.rs b/svm/src/lib.rs new file mode 100644 index 00000000000000..ff28128edca36d --- /dev/null +++ b/svm/src/lib.rs @@ -0,0 +1,16 @@ +#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![allow(clippy::arithmetic_side_effects)] + +pub mod account_loader; +pub mod account_overrides; +pub mod account_rent_state; +pub mod runtime_config; +pub mod transaction_account_state_info; +pub mod transaction_error_metrics; +pub mod transaction_processor; + +#[macro_use] +extern crate solana_metrics; + +#[macro_use] +extern crate solana_frozen_abi_macro; diff --git a/runtime/src/runtime_config.rs b/svm/src/runtime_config.rs similarity index 100% rename from runtime/src/runtime_config.rs rename to svm/src/runtime_config.rs diff --git a/runtime/src/svm/transaction_account_state_info.rs b/svm/src/transaction_account_state_info.rs similarity index 94% rename from runtime/src/svm/transaction_account_state_info.rs rename to svm/src/transaction_account_state_info.rs index 48a6a63994e341..02d6f0228de2a7 100644 --- a/runtime/src/svm/transaction_account_state_info.rs +++ b/svm/src/transaction_account_state_info.rs @@ -1,5 +1,5 @@ use { - crate::svm::account_rent_state::RentState, + crate::account_rent_state::RentState, solana_sdk::{ account::ReadableAccount, message::SanitizedMessage, @@ -10,12 +10,12 @@ use { }, }; -pub(crate) struct TransactionAccountStateInfo { +pub struct TransactionAccountStateInfo { rent_state: Option, // None: readonly account } impl TransactionAccountStateInfo { - pub(crate) fn new( + pub fn new( rent: &Rent, transaction_context: &TransactionContext, message: &SanitizedMessage, diff --git a/accounts-db/src/transaction_error_metrics.rs b/svm/src/transaction_error_metrics.rs similarity index 100% rename from accounts-db/src/transaction_error_metrics.rs rename to svm/src/transaction_error_metrics.rs diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs new file mode 100644 index 00000000000000..71fc4e8e8a46b2 --- /dev/null +++ b/svm/src/transaction_processor.rs @@ -0,0 +1,963 @@ +use { + crate::{ + account_loader::{load_accounts, TransactionCheckResult}, + account_overrides::AccountOverrides, + runtime_config::RuntimeConfig, + transaction_account_state_info::TransactionAccountStateInfo, + transaction_error_metrics::TransactionErrorMetrics, + }, + log::debug, + percentage::Percentage, + solana_accounts_db::{ + accounts::{LoadedTransaction, TransactionLoadResult}, + transaction_results::{ + DurableNonceFee, TransactionExecutionDetails, TransactionExecutionResult, + }, + }, + solana_measure::measure::Measure, + solana_program_runtime::{ + compute_budget::ComputeBudget, + loaded_programs::{ + ForkGraph, LoadProgramMetrics, LoadedProgram, LoadedProgramMatchCriteria, + LoadedProgramType, LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, + ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET, + }, + log_collector::LogCollector, + message_processor::MessageProcessor, + sysvar_cache::SysvarCache, + timings::{ExecuteDetailsTimings, ExecuteTimingType, ExecuteTimings}, + }, + solana_sdk::{ + account::{AccountSharedData, ReadableAccount, PROGRAM_OWNERS}, + account_utils::StateMut, + bpf_loader_upgradeable::{self, UpgradeableLoaderState}, + clock::{Epoch, Slot}, + epoch_schedule::EpochSchedule, + feature_set::FeatureSet, + fee::FeeStructure, + hash::Hash, + inner_instruction::{InnerInstruction, InnerInstructionsList}, + instruction::{CompiledInstruction, InstructionError, TRANSACTION_LEVEL_STACK_HEIGHT}, + loader_v4::{self, LoaderV4State, LoaderV4Status}, + message::SanitizedMessage, + native_loader, + pubkey::Pubkey, + rent_collector::RentCollector, + saturating_add_assign, + transaction::{self, SanitizedTransaction, TransactionError}, + transaction_context::{ExecutionRecord, TransactionContext}, + }, + std::{ + cell::RefCell, + collections::{hash_map::Entry, HashMap}, + fmt::{Debug, Formatter}, + rc::Rc, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, RwLock, + }, + }, +}; + +/// A list of log messages emitted during a transaction +pub type TransactionLogMessages = Vec; + +pub struct LoadAndExecuteSanitizedTransactionsOutput { + pub loaded_transactions: Vec, + // Vector of results indicating whether a transaction was executed or could not + // be executed. Note executed transactions can still have failed! + pub execution_results: Vec, +} + +pub trait TransactionProcessingCallback { + fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option; + + fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option; + + fn get_last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64); + + fn get_rent_collector(&self) -> &RentCollector; + + fn get_feature_set(&self) -> Arc; + + fn check_account_access( + &self, + _tx: &SanitizedTransaction, + _account_index: usize, + _account: &AccountSharedData, + _error_counters: &mut TransactionErrorMetrics, + ) -> transaction::Result<()> { + Ok(()) + } +} + +enum ProgramAccountLoadResult { + AccountNotFound, + InvalidAccountData(ProgramRuntimeEnvironment), + ProgramOfLoaderV1orV2(AccountSharedData), + ProgramOfLoaderV3(AccountSharedData, AccountSharedData, Slot), + ProgramOfLoaderV4(AccountSharedData, Slot), +} + +#[derive(AbiExample)] +pub struct TransactionBatchProcessor { + /// Bank slot (i.e. block) + slot: Slot, + + /// Bank epoch + epoch: Epoch, + + /// initialized from genesis + epoch_schedule: EpochSchedule, + + /// Transaction fee structure + fee_structure: FeeStructure, + + pub check_program_modification_slot: bool, + + /// Optional config parameters that can override runtime behavior + runtime_config: Arc, + + pub sysvar_cache: RwLock, + + pub loaded_programs_cache: Arc>>, +} + +impl Debug for TransactionBatchProcessor { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TransactionBatchProcessor") + .field("slot", &self.slot) + .field("epoch", &self.epoch) + .field("epoch_schedule", &self.epoch_schedule) + .field("fee_structure", &self.fee_structure) + .field( + "check_program_modification_slot", + &self.check_program_modification_slot, + ) + .field("runtime_config", &self.runtime_config) + .field("sysvar_cache", &self.sysvar_cache) + .field("loaded_programs_cache", &self.loaded_programs_cache) + .finish() + } +} + +impl Default for TransactionBatchProcessor { + fn default() -> Self { + Self { + slot: Slot::default(), + epoch: Epoch::default(), + epoch_schedule: EpochSchedule::default(), + fee_structure: FeeStructure::default(), + check_program_modification_slot: false, + runtime_config: Arc::::default(), + sysvar_cache: RwLock::::default(), + loaded_programs_cache: Arc::new(RwLock::new(LoadedPrograms::new( + Slot::default(), + Epoch::default(), + ))), + } + } +} + +impl TransactionBatchProcessor { + pub fn new( + slot: Slot, + epoch: Epoch, + epoch_schedule: EpochSchedule, + fee_structure: FeeStructure, + runtime_config: Arc, + loaded_programs_cache: Arc>>, + ) -> Self { + Self { + slot, + epoch, + epoch_schedule, + fee_structure, + check_program_modification_slot: false, + runtime_config, + sysvar_cache: RwLock::::default(), + loaded_programs_cache, + } + } + + #[allow(clippy::too_many_arguments)] + pub fn load_and_execute_sanitized_transactions<'a, CB: TransactionProcessingCallback>( + &self, + callbacks: &CB, + sanitized_txs: &[SanitizedTransaction], + check_results: &mut [TransactionCheckResult], + error_counters: &mut TransactionErrorMetrics, + enable_cpi_recording: bool, + enable_log_recording: bool, + enable_return_data_recording: bool, + timings: &mut ExecuteTimings, + account_overrides: Option<&AccountOverrides>, + builtin_programs: impl Iterator, + log_messages_bytes_limit: Option, + ) -> LoadAndExecuteSanitizedTransactionsOutput { + let mut program_accounts_map = Self::filter_executable_program_accounts( + callbacks, + sanitized_txs, + check_results, + PROGRAM_OWNERS, + ); + let native_loader = native_loader::id(); + for builtin_program in builtin_programs { + program_accounts_map.insert(*builtin_program, (&native_loader, 0)); + } + + let programs_loaded_for_tx_batch = Rc::new(RefCell::new( + self.replenish_program_cache(callbacks, &program_accounts_map), + )); + + let mut load_time = Measure::start("accounts_load"); + let mut loaded_transactions = load_accounts( + callbacks, + sanitized_txs, + check_results, + error_counters, + &self.fee_structure, + account_overrides, + &program_accounts_map, + &programs_loaded_for_tx_batch.borrow(), + ); + load_time.stop(); + + let mut execution_time = Measure::start("execution_time"); + + let execution_results: Vec = loaded_transactions + .iter_mut() + .zip(sanitized_txs.iter()) + .map(|(accs, tx)| match accs { + (Err(e), _nonce) => TransactionExecutionResult::NotExecuted(e.clone()), + (Ok(loaded_transaction), nonce) => { + let compute_budget = + if let Some(compute_budget) = self.runtime_config.compute_budget { + compute_budget + } else { + let mut compute_budget_process_transaction_time = + Measure::start("compute_budget_process_transaction_time"); + let maybe_compute_budget = ComputeBudget::try_from_instructions( + tx.message().program_instructions_iter(), + ); + compute_budget_process_transaction_time.stop(); + saturating_add_assign!( + timings + .execute_accessories + .compute_budget_process_transaction_us, + compute_budget_process_transaction_time.as_us() + ); + if let Err(err) = maybe_compute_budget { + return TransactionExecutionResult::NotExecuted(err); + } + maybe_compute_budget.unwrap() + }; + + let result = self.execute_loaded_transaction( + callbacks, + tx, + loaded_transaction, + compute_budget, + nonce.as_ref().map(DurableNonceFee::from), + enable_cpi_recording, + enable_log_recording, + enable_return_data_recording, + timings, + error_counters, + log_messages_bytes_limit, + &programs_loaded_for_tx_batch.borrow(), + ); + + if let TransactionExecutionResult::Executed { + details, + programs_modified_by_tx, + } = &result + { + // Update batch specific cache of the loaded programs with the modifications + // made by the transaction, if it executed successfully. + if details.status.is_ok() { + programs_loaded_for_tx_batch + .borrow_mut() + .merge(programs_modified_by_tx); + } + } + + result + } + }) + .collect(); + + execution_time.stop(); + + const SHRINK_LOADED_PROGRAMS_TO_PERCENTAGE: u8 = 90; + self.loaded_programs_cache + .write() + .unwrap() + .evict_using_2s_random_selection( + Percentage::from(SHRINK_LOADED_PROGRAMS_TO_PERCENTAGE), + self.slot, + ); + + debug!( + "load: {}us execute: {}us txs_len={}", + load_time.as_us(), + execution_time.as_us(), + sanitized_txs.len(), + ); + + timings.saturating_add_in_place(ExecuteTimingType::LoadUs, load_time.as_us()); + timings.saturating_add_in_place(ExecuteTimingType::ExecuteUs, execution_time.as_us()); + + LoadAndExecuteSanitizedTransactionsOutput { + loaded_transactions, + execution_results, + } + } + + /// Returns a hash map of executable program accounts (program accounts that are not writable + /// in the given transactions), and their owners, for the transactions with a valid + /// blockhash or nonce. + pub fn filter_executable_program_accounts<'a, CB: TransactionProcessingCallback>( + callbacks: &CB, + txs: &[SanitizedTransaction], + lock_results: &mut [TransactionCheckResult], + program_owners: &'a [Pubkey], + ) -> HashMap { + let mut result: HashMap = HashMap::new(); + lock_results.iter_mut().zip(txs).for_each(|etx| { + if let ((Ok(()), _nonce, lamports_per_signature), tx) = etx { + if lamports_per_signature.is_some() { + tx.message() + .account_keys() + .iter() + .for_each(|key| match result.entry(*key) { + Entry::Occupied(mut entry) => { + let (_, count) = entry.get_mut(); + saturating_add_assign!(*count, 1); + } + Entry::Vacant(entry) => { + if let Some(index) = + callbacks.account_matches_owners(key, program_owners) + { + program_owners + .get(index) + .map(|owner| entry.insert((owner, 1))); + } + } + }); + } else { + // If the transaction's nonce account was not valid, and blockhash is not found, + // the transaction will fail to process. Let's not load any programs from the + // transaction, and update the status of the transaction. + *etx.0 = (Err(TransactionError::BlockhashNotFound), None, None); + } + } + }); + result + } + + fn replenish_program_cache( + &self, + callback: &CB, + program_accounts_map: &HashMap, + ) -> LoadedProgramsForTxBatch { + let mut missing_programs: Vec<(Pubkey, (LoadedProgramMatchCriteria, u64))> = + if self.check_program_modification_slot { + program_accounts_map + .iter() + .map(|(pubkey, (_, count))| { + ( + *pubkey, + ( + self.program_modification_slot(callback, pubkey) + .map_or(LoadedProgramMatchCriteria::Tombstone, |slot| { + LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(slot) + }), + *count, + ), + ) + }) + .collect() + } else { + program_accounts_map + .iter() + .map(|(pubkey, (_, count))| { + (*pubkey, (LoadedProgramMatchCriteria::NoCriteria, *count)) + }) + .collect() + }; + + let mut loaded_programs_for_txs = None; + let mut program_to_store = None; + loop { + let (program_to_load, task_cookie, task_waiter) = { + // Lock the global cache. + let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); + // Initialize our local cache. + let is_first_round = loaded_programs_for_txs.is_none(); + if is_first_round { + loaded_programs_for_txs = Some(LoadedProgramsForTxBatch::new( + self.slot, + loaded_programs_cache + .get_environments_for_epoch(self.epoch) + .clone(), + )); + } + // Submit our last completed loading task. + if let Some((key, program)) = program_to_store.take() { + loaded_programs_cache.finish_cooperative_loading_task(self.slot, key, program); + } + // Figure out which program needs to be loaded next. + let program_to_load = loaded_programs_cache.extract( + &mut missing_programs, + loaded_programs_for_txs.as_mut().unwrap(), + is_first_round, + ); + let task_waiter = Arc::clone(&loaded_programs_cache.loading_task_waiter); + (program_to_load, task_waiter.cookie(), task_waiter) + // Unlock the global cache again. + }; + + if let Some((key, count)) = program_to_load { + // Load, verify and compile one program. + let program = self.load_program(callback, &key, false, None); + program.tx_usage_counter.store(count, Ordering::Relaxed); + program_to_store = Some((key, program)); + } else if missing_programs.is_empty() { + break; + } else { + // Sleep until the next finish_cooperative_loading_task() call. + // Once a task completes we'll wake up and try to load the + // missing programs inside the tx batch again. + let _new_cookie = task_waiter.wait(task_cookie); + } + } + + loaded_programs_for_txs.unwrap() + } + + /// Execute a transaction using the provided loaded accounts and update + /// the executors cache if the transaction was successful. + #[allow(clippy::too_many_arguments)] + fn execute_loaded_transaction( + &self, + callback: &CB, + tx: &SanitizedTransaction, + loaded_transaction: &mut LoadedTransaction, + compute_budget: ComputeBudget, + durable_nonce_fee: Option, + enable_cpi_recording: bool, + enable_log_recording: bool, + enable_return_data_recording: bool, + timings: &mut ExecuteTimings, + error_counters: &mut TransactionErrorMetrics, + log_messages_bytes_limit: Option, + programs_loaded_for_tx_batch: &LoadedProgramsForTxBatch, + ) -> TransactionExecutionResult { + let transaction_accounts = std::mem::take(&mut loaded_transaction.accounts); + + fn transaction_accounts_lamports_sum( + accounts: &[(Pubkey, AccountSharedData)], + message: &SanitizedMessage, + ) -> Option { + let mut lamports_sum = 0u128; + for i in 0..message.account_keys().len() { + let (_, account) = accounts.get(i)?; + lamports_sum = lamports_sum.checked_add(u128::from(account.lamports()))?; + } + Some(lamports_sum) + } + + let lamports_before_tx = + transaction_accounts_lamports_sum(&transaction_accounts, tx.message()).unwrap_or(0); + + let mut transaction_context = TransactionContext::new( + transaction_accounts, + callback.get_rent_collector().rent.clone(), + compute_budget.max_invoke_stack_height, + compute_budget.max_instruction_trace_length, + ); + #[cfg(debug_assertions)] + transaction_context.set_signature(tx.signature()); + + let pre_account_state_info = TransactionAccountStateInfo::new( + &callback.get_rent_collector().rent, + &transaction_context, + tx.message(), + ); + + let log_collector = if enable_log_recording { + match log_messages_bytes_limit { + None => Some(LogCollector::new_ref()), + Some(log_messages_bytes_limit) => Some(LogCollector::new_ref_with_limit(Some( + log_messages_bytes_limit, + ))), + } + } else { + None + }; + + let (blockhash, lamports_per_signature) = + callback.get_last_blockhash_and_lamports_per_signature(); + + let mut executed_units = 0u64; + let mut programs_modified_by_tx = LoadedProgramsForTxBatch::new( + self.slot, + programs_loaded_for_tx_batch.environments.clone(), + ); + let mut process_message_time = Measure::start("process_message_time"); + let process_result = MessageProcessor::process_message( + tx.message(), + &loaded_transaction.program_indices, + &mut transaction_context, + log_collector.clone(), + programs_loaded_for_tx_batch, + &mut programs_modified_by_tx, + callback.get_feature_set(), + compute_budget, + timings, + &self.sysvar_cache.read().unwrap(), + blockhash, + lamports_per_signature, + &mut executed_units, + ); + process_message_time.stop(); + + saturating_add_assign!( + timings.execute_accessories.process_message_us, + process_message_time.as_us() + ); + + let mut status = process_result + .and_then(|info| { + let post_account_state_info = TransactionAccountStateInfo::new( + &callback.get_rent_collector().rent, + &transaction_context, + tx.message(), + ); + TransactionAccountStateInfo::verify_changes( + &pre_account_state_info, + &post_account_state_info, + &transaction_context, + ) + .map(|_| info) + }) + .map_err(|err| { + match err { + TransactionError::InvalidRentPayingAccount + | TransactionError::InsufficientFundsForRent { .. } => { + error_counters.invalid_rent_paying_account += 1; + } + TransactionError::InvalidAccountIndex => { + error_counters.invalid_account_index += 1; + } + _ => { + error_counters.instruction_error += 1; + } + } + err + }); + + let log_messages: Option = + log_collector.and_then(|log_collector| { + Rc::try_unwrap(log_collector) + .map(|log_collector| log_collector.into_inner().into_messages()) + .ok() + }); + + let inner_instructions = if enable_cpi_recording { + Some(Self::inner_instructions_list_from_instruction_trace( + &transaction_context, + )) + } else { + None + }; + + let ExecutionRecord { + accounts, + return_data, + touched_account_count, + accounts_resize_delta: accounts_data_len_delta, + } = transaction_context.into(); + + if status.is_ok() + && transaction_accounts_lamports_sum(&accounts, tx.message()) + .filter(|lamports_after_tx| lamports_before_tx == *lamports_after_tx) + .is_none() + { + status = Err(TransactionError::UnbalancedTransaction); + } + let status = status.map(|_| ()); + + loaded_transaction.accounts = accounts; + saturating_add_assign!( + timings.details.total_account_count, + loaded_transaction.accounts.len() as u64 + ); + saturating_add_assign!(timings.details.changed_account_count, touched_account_count); + + let return_data = if enable_return_data_recording && !return_data.data.is_empty() { + Some(return_data) + } else { + None + }; + + TransactionExecutionResult::Executed { + details: TransactionExecutionDetails { + status, + log_messages, + inner_instructions, + durable_nonce_fee, + return_data, + executed_units, + accounts_data_len_delta, + }, + programs_modified_by_tx: Box::new(programs_modified_by_tx), + } + } + + fn program_modification_slot( + &self, + callbacks: &CB, + pubkey: &Pubkey, + ) -> transaction::Result { + let program = callbacks + .get_account_shared_data(pubkey) + .ok_or(TransactionError::ProgramAccountNotFound)?; + if bpf_loader_upgradeable::check_id(program.owner()) { + if let Ok(UpgradeableLoaderState::Program { + programdata_address, + }) = program.state() + { + let programdata = callbacks + .get_account_shared_data(&programdata_address) + .ok_or(TransactionError::ProgramAccountNotFound)?; + if let Ok(UpgradeableLoaderState::ProgramData { + slot, + upgrade_authority_address: _, + }) = programdata.state() + { + return Ok(slot); + } + } + Err(TransactionError::ProgramAccountNotFound) + } else if loader_v4::check_id(program.owner()) { + let state = solana_loader_v4_program::get_state(program.data()) + .map_err(|_| TransactionError::ProgramAccountNotFound)?; + Ok(state.slot) + } else { + Ok(0) + } + } + + pub fn load_program( + &self, + callbacks: &CB, + pubkey: &Pubkey, + reload: bool, + recompile: Option>, + ) -> Arc { + let loaded_programs_cache = self.loaded_programs_cache.read().unwrap(); + let effective_epoch = if recompile.is_some() { + loaded_programs_cache.latest_root_epoch.saturating_add(1) + } else { + self.epoch + }; + let environments = loaded_programs_cache.get_environments_for_epoch(effective_epoch); + let mut load_program_metrics = LoadProgramMetrics { + program_id: pubkey.to_string(), + ..LoadProgramMetrics::default() + }; + + let mut loaded_program = + match self.load_program_accounts(callbacks, pubkey, environments) { + ProgramAccountLoadResult::AccountNotFound => Ok(LoadedProgram::new_tombstone( + self.slot, + LoadedProgramType::Closed, + )), + + ProgramAccountLoadResult::InvalidAccountData(env) => Err((self.slot, env)), + + ProgramAccountLoadResult::ProgramOfLoaderV1orV2(program_account) => { + Self::load_program_from_bytes( + &mut load_program_metrics, + program_account.data(), + program_account.owner(), + program_account.data().len(), + 0, + environments.program_runtime_v1.clone(), + reload, + ) + .map_err(|_| (0, environments.program_runtime_v1.clone())) + } + + ProgramAccountLoadResult::ProgramOfLoaderV3( + program_account, + programdata_account, + slot, + ) => programdata_account + .data() + .get(UpgradeableLoaderState::size_of_programdata_metadata()..) + .ok_or(Box::new(InstructionError::InvalidAccountData).into()) + .and_then(|programdata| { + Self::load_program_from_bytes( + &mut load_program_metrics, + programdata, + program_account.owner(), + program_account + .data() + .len() + .saturating_add(programdata_account.data().len()), + slot, + environments.program_runtime_v1.clone(), + reload, + ) + }) + .map_err(|_| (slot, environments.program_runtime_v1.clone())), + + ProgramAccountLoadResult::ProgramOfLoaderV4(program_account, slot) => { + program_account + .data() + .get(LoaderV4State::program_data_offset()..) + .ok_or(Box::new(InstructionError::InvalidAccountData).into()) + .and_then(|elf_bytes| { + Self::load_program_from_bytes( + &mut load_program_metrics, + elf_bytes, + &loader_v4::id(), + program_account.data().len(), + slot, + environments.program_runtime_v2.clone(), + reload, + ) + }) + .map_err(|_| (slot, environments.program_runtime_v2.clone())) + } + } + .unwrap_or_else(|(slot, env)| { + LoadedProgram::new_tombstone(slot, LoadedProgramType::FailedVerification(env)) + }); + + let mut timings = ExecuteDetailsTimings::default(); + load_program_metrics.submit_datapoint(&mut timings); + if let Some(recompile) = recompile { + loaded_program.effective_slot = loaded_program + .effective_slot + .max(self.epoch_schedule.get_first_slot_in_epoch(effective_epoch)); + loaded_program.tx_usage_counter = + AtomicU64::new(recompile.tx_usage_counter.load(Ordering::Relaxed)); + loaded_program.ix_usage_counter = + AtomicU64::new(recompile.ix_usage_counter.load(Ordering::Relaxed)); + } + loaded_program.update_access_slot(self.slot); + Arc::new(loaded_program) + } + + fn load_program_from_bytes( + load_program_metrics: &mut LoadProgramMetrics, + programdata: &[u8], + loader_key: &Pubkey, + account_size: usize, + deployment_slot: Slot, + program_runtime_environment: ProgramRuntimeEnvironment, + reloading: bool, + ) -> std::result::Result> { + if reloading { + // Safety: this is safe because the program is being reloaded in the cache. + unsafe { + LoadedProgram::reload( + loader_key, + program_runtime_environment.clone(), + deployment_slot, + deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET), + programdata, + account_size, + load_program_metrics, + ) + } + } else { + LoadedProgram::new( + loader_key, + program_runtime_environment.clone(), + deployment_slot, + deployment_slot.saturating_add(DELAY_VISIBILITY_SLOT_OFFSET), + programdata, + account_size, + load_program_metrics, + ) + } + } + + fn load_program_accounts( + &self, + callbacks: &CB, + pubkey: &Pubkey, + environments: &ProgramRuntimeEnvironments, + ) -> ProgramAccountLoadResult { + let program_account = match callbacks.get_account_shared_data(pubkey) { + None => return ProgramAccountLoadResult::AccountNotFound, + Some(account) => account, + }; + + debug_assert!(solana_bpf_loader_program::check_loader_id( + program_account.owner() + )); + + if loader_v4::check_id(program_account.owner()) { + return solana_loader_v4_program::get_state(program_account.data()) + .ok() + .and_then(|state| { + (!matches!(state.status, LoaderV4Status::Retracted)).then_some(state.slot) + }) + .map(|slot| ProgramAccountLoadResult::ProgramOfLoaderV4(program_account, slot)) + .unwrap_or(ProgramAccountLoadResult::InvalidAccountData( + environments.program_runtime_v2.clone(), + )); + } + + if !bpf_loader_upgradeable::check_id(program_account.owner()) { + return ProgramAccountLoadResult::ProgramOfLoaderV1orV2(program_account); + } + + if let Ok(UpgradeableLoaderState::Program { + programdata_address, + }) = program_account.state() + { + let programdata_account = match callbacks.get_account_shared_data(&programdata_address) + { + None => return ProgramAccountLoadResult::AccountNotFound, + Some(account) => account, + }; + + if let Ok(UpgradeableLoaderState::ProgramData { + slot, + upgrade_authority_address: _, + }) = programdata_account.state() + { + return ProgramAccountLoadResult::ProgramOfLoaderV3( + program_account, + programdata_account, + slot, + ); + } + } + ProgramAccountLoadResult::InvalidAccountData(environments.program_runtime_v1.clone()) + } + + /// Extract the InnerInstructionsList from a TransactionContext + fn inner_instructions_list_from_instruction_trace( + transaction_context: &TransactionContext, + ) -> InnerInstructionsList { + debug_assert!(transaction_context + .get_instruction_context_at_index_in_trace(0) + .map(|instruction_context| instruction_context.get_stack_height() + == TRANSACTION_LEVEL_STACK_HEIGHT) + .unwrap_or(true)); + let mut outer_instructions = Vec::new(); + for index_in_trace in 0..transaction_context.get_instruction_trace_length() { + if let Ok(instruction_context) = + transaction_context.get_instruction_context_at_index_in_trace(index_in_trace) + { + let stack_height = instruction_context.get_stack_height(); + if stack_height == TRANSACTION_LEVEL_STACK_HEIGHT { + outer_instructions.push(Vec::new()); + } else if let Some(inner_instructions) = outer_instructions.last_mut() { + let stack_height = u8::try_from(stack_height).unwrap_or(u8::MAX); + let instruction = CompiledInstruction::new_from_raw_parts( + instruction_context + .get_index_of_program_account_in_transaction( + instruction_context + .get_number_of_program_accounts() + .saturating_sub(1), + ) + .unwrap_or_default() as u8, + instruction_context.get_instruction_data().to_vec(), + (0..instruction_context.get_number_of_instruction_accounts()) + .map(|instruction_account_index| { + instruction_context + .get_index_of_instruction_account_in_transaction( + instruction_account_index, + ) + .unwrap_or_default() as u8 + }) + .collect(), + ); + inner_instructions.push(InnerInstruction { + instruction, + stack_height, + }); + } else { + debug_assert!(false); + } + } else { + debug_assert!(false); + } + } + outer_instructions + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + solana_program_runtime::loaded_programs::BlockRelation, + solana_sdk::{sysvar::rent::Rent, transaction_context::TransactionContext}, + }; + + struct TestForkGraph {} + + impl ForkGraph for TestForkGraph { + fn relationship(&self, _a: Slot, _b: Slot) -> BlockRelation { + BlockRelation::Unknown + } + } + + #[test] + fn test_inner_instructions_list_from_instruction_trace() { + let instruction_trace = [1, 2, 1, 1, 2, 3, 2]; + let mut transaction_context = + TransactionContext::new(vec![], Rent::default(), 3, instruction_trace.len()); + for (index_in_trace, stack_height) in instruction_trace.into_iter().enumerate() { + while stack_height <= transaction_context.get_instruction_context_stack_height() { + transaction_context.pop().unwrap(); + } + if stack_height > transaction_context.get_instruction_context_stack_height() { + transaction_context + .get_next_instruction_context() + .unwrap() + .configure(&[], &[], &[index_in_trace as u8]); + transaction_context.push().unwrap(); + } + } + let inner_instructions = + TransactionBatchProcessor::::inner_instructions_list_from_instruction_trace( + &transaction_context, + ); + + assert_eq!( + inner_instructions, + vec![ + vec![InnerInstruction { + instruction: CompiledInstruction::new_from_raw_parts(0, vec![1], vec![]), + stack_height: 2, + }], + vec![], + vec![ + InnerInstruction { + instruction: CompiledInstruction::new_from_raw_parts(0, vec![4], vec![]), + stack_height: 2, + }, + InnerInstruction { + instruction: CompiledInstruction::new_from_raw_parts(0, vec![5], vec![]), + stack_height: 3, + }, + InnerInstruction { + instruction: CompiledInstruction::new_from_raw_parts(0, vec![6], vec![]), + stack_height: 2, + }, + ] + ] + ); + } +} diff --git a/test-validator/Cargo.toml b/test-validator/Cargo.toml index 60f299d01e58a0..2bc8deb5fc200e 100644 --- a/test-validator/Cargo.toml +++ b/test-validator/Cargo.toml @@ -32,6 +32,7 @@ solana-rpc-client = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-streamer = { workspace = true } +solana-svm = { workspace = true } solana-tpu-client = { workspace = true } tokio = { workspace = true, features = ["full"] } diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index f041d80e6148e3..c658b53305bf74 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -34,7 +34,7 @@ use { solana_rpc_client::{nonblocking, rpc_client::RpcClient}, solana_runtime::{ bank_forks::BankForks, genesis_utils::create_genesis_config_with_leader_ex, - runtime_config::RuntimeConfig, snapshot_config::SnapshotConfig, + snapshot_config::SnapshotConfig, }, solana_sdk::{ account::{Account, AccountSharedData}, @@ -54,6 +54,7 @@ use { signature::{read_keypair_file, write_keypair_file, Keypair, Signer}, }, solana_streamer::socket::SocketAddrSpace, + solana_svm::runtime_config::RuntimeConfig, solana_tpu_client::tpu_client::{ DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_TPU_ENABLE_UDP, DEFAULT_TPU_USE_QUIC, }, diff --git a/turbine/src/broadcast_stage.rs b/turbine/src/broadcast_stage.rs index d799c0d9b62005..7538754539ff43 100644 --- a/turbine/src/broadcast_stage.rs +++ b/turbine/src/broadcast_stage.rs @@ -66,6 +66,8 @@ pub enum Error { Blockstore(#[from] solana_ledger::blockstore::BlockstoreError), #[error(transparent)] ClusterInfo(#[from] solana_gossip::cluster_info::ClusterInfoError), + #[error("Invalid Merkle root, slot: {slot}, index: {index}")] + InvalidMerkleRoot { slot: Slot, index: u64 }, #[error(transparent)] Io(#[from] std::io::Error), #[error(transparent)] @@ -76,8 +78,14 @@ pub enum Error { Send, #[error(transparent)] Serialize(#[from] std::boxed::Box), + #[error("Shred not found, slot: {slot}, index: {index}")] + ShredNotFound { slot: Slot, index: u64 }, #[error(transparent)] TransportError(#[from] solana_sdk::transport::TransportError), + #[error("Unknown last index, slot: {0}")] + UnknownLastIndex(Slot), + #[error("Unknown slot meta, slot: {0}")] + UnknownSlotMeta(Slot), } type Result = std::result::Result; diff --git a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs index 8bee47068ac499..adca69ed4938cd 100644 --- a/turbine/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/turbine/src/broadcast_stage/broadcast_duplicates_run.rs @@ -37,6 +37,7 @@ pub struct BroadcastDuplicatesConfig { pub(super) struct BroadcastDuplicatesRun { config: BroadcastDuplicatesConfig, current_slot: Slot, + chained_merkle_root: Hash, next_shred_index: u32, next_code_index: u32, shred_version: u16, @@ -57,6 +58,7 @@ impl BroadcastDuplicatesRun { )); Self { config, + chained_merkle_root: Hash::default(), next_shred_index: u32::MAX, next_code_index: 0, shred_version, @@ -76,7 +78,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { fn run( &mut self, keypair: &Keypair, - _blockstore: &Blockstore, + blockstore: &Blockstore, receiver: &Receiver, socket_sender: &Sender<(Arc>, Option)>, blockstore_sender: &Sender<(Arc>, Option)>, @@ -87,6 +89,12 @@ impl BroadcastRun for BroadcastDuplicatesRun { let last_tick_height = receive_results.last_tick_height; if bank.slot() != self.current_slot { + self.chained_merkle_root = broadcast_utils::get_chained_merkle_root_from_parent( + bank.slot(), + bank.parent_slot(), + blockstore, + ) + .unwrap(); self.next_shred_index = 0; self.next_code_index = 0; self.current_slot = bank.slot(); @@ -169,18 +177,25 @@ impl BroadcastRun for BroadcastDuplicatesRun { ) .expect("Expected to create a new shredder"); + // Chained Merkle shreds are always discarded in epoch 0, due to + // feature_set::enable_chained_merkle_shreds. Below can be removed once + // the feature gated code is removed. + let should_chain_merkle_shreds = bank.epoch() > 0; + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( keypair, &receive_results.entries, last_tick_height == bank.max_tick_height() && last_entries.is_none(), - None, // chained_merkle_root + should_chain_merkle_shreds.then_some(self.chained_merkle_root), self.next_shred_index, self.next_code_index, - false, // merkle_variant + true, // merkle_variant &self.reed_solomon_cache, &mut ProcessShredsStats::default(), ); - + if let Some(shred) = data_shreds.iter().max_by_key(|shred| shred.index()) { + self.chained_merkle_root = shred.merkle_root().unwrap(); + } self.next_shred_index += data_shreds.len() as u32; if let Some(index) = coding_shreds.iter().map(Shred::index).max() { self.next_code_index = index + 1; @@ -191,10 +206,10 @@ impl BroadcastRun for BroadcastDuplicatesRun { keypair, &[original_last_entry], true, - None, // chained_merkle_root + should_chain_merkle_shreds.then_some(self.chained_merkle_root), self.next_shred_index, self.next_code_index, - false, // merkle_variant + true, // merkle_variant &self.reed_solomon_cache, &mut ProcessShredsStats::default(), ); @@ -205,10 +220,10 @@ impl BroadcastRun for BroadcastDuplicatesRun { keypair, &duplicate_extra_last_entries, true, - None, // chained_merkle_root + should_chain_merkle_shreds.then_some(self.chained_merkle_root), self.next_shred_index, self.next_code_index, - false, // merkle_variant + true, // merkle_variant &self.reed_solomon_cache, &mut ProcessShredsStats::default(), ); @@ -222,6 +237,8 @@ impl BroadcastRun for BroadcastDuplicatesRun { sigs, ); + assert_eq!(original_last_data_shred.len(), 1); + assert_eq!(partition_last_data_shred.len(), 1); self.next_shred_index += 1; (original_last_data_shred, partition_last_data_shred) }); diff --git a/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs b/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs index 20d141dee01a73..b82ca324b61820 100644 --- a/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs +++ b/turbine/src/broadcast_stage/broadcast_fake_shreds_run.rs @@ -1,7 +1,7 @@ use { super::*, solana_entry::entry::Entry, - solana_ledger::shred::{ProcessShredsStats, ReedSolomonCache, Shredder}, + solana_ledger::shred::{self, ProcessShredsStats, ReedSolomonCache, Shredder}, solana_sdk::{hash::Hash, signature::Keypair}, }; @@ -45,6 +45,21 @@ impl BroadcastRun for BroadcastFakeShredsRun { .expect("Database error") .map(|meta| meta.consumed) .unwrap_or(0) as u32; + let chained_merkle_root = match next_shred_index.checked_sub(1) { + None => broadcast_utils::get_chained_merkle_root_from_parent( + bank.slot(), + bank.parent_slot(), + blockstore, + ) + .unwrap(), + Some(index) => { + let shred = blockstore + .get_data_shred(bank.slot(), u64::from(index)) + .unwrap() + .unwrap(); + shred::layout::get_merkle_root(&shred).unwrap() + } + }; let num_entries = receive_results.entries.len(); @@ -60,7 +75,7 @@ impl BroadcastRun for BroadcastFakeShredsRun { keypair, &receive_results.entries, last_tick_height == bank.max_tick_height(), - None, // chained_merkle_root + Some(chained_merkle_root), next_shred_index, self.next_code_index, true, // merkle_variant @@ -82,7 +97,7 @@ impl BroadcastRun for BroadcastFakeShredsRun { keypair, &fake_entries, last_tick_height == bank.max_tick_height(), - None, // chained_merkle_root + Some(chained_merkle_root), next_shred_index, self.next_code_index, true, // merkle_variant diff --git a/turbine/src/broadcast_stage/broadcast_utils.rs b/turbine/src/broadcast_stage/broadcast_utils.rs index fe99077091f516..be231581e7fbfe 100644 --- a/turbine/src/broadcast_stage/broadcast_utils.rs +++ b/turbine/src/broadcast_stage/broadcast_utils.rs @@ -1,12 +1,15 @@ use { - super::Result, + super::{Error, Result}, bincode::serialized_size, crossbeam_channel::Receiver, solana_entry::entry::Entry, - solana_ledger::shred::ShredData, + solana_ledger::{ + blockstore::Blockstore, + shred::{self, ShredData}, + }, solana_poh::poh_recorder::WorkingBankEntry, solana_runtime::bank::Bank, - solana_sdk::clock::Slot, + solana_sdk::{clock::Slot, hash::Hash}, std::{ sync::Arc, time::{Duration, Instant}, @@ -25,6 +28,7 @@ pub(super) struct ReceiveResults { #[derive(Clone)] pub struct UnfinishedSlotInfo { + pub(super) chained_merkle_root: Hash, pub next_shred_index: u32, pub(crate) next_code_index: u32, pub slot: Slot, @@ -96,6 +100,34 @@ pub(super) fn recv_slot_entries(receiver: &Receiver) -> Result }) } +// Returns the Merkle root of the last erasure batch of the parent slot. +pub(super) fn get_chained_merkle_root_from_parent( + slot: Slot, + parent: Slot, + blockstore: &Blockstore, +) -> Result { + if slot == parent { + debug_assert_eq!(slot, 0u64); + return Ok(Hash::default()); + } + debug_assert!(parent < slot, "parent: {parent} >= slot: {slot}"); + let index = blockstore + .meta(parent)? + .ok_or_else(|| Error::UnknownSlotMeta(parent))? + .last_index + .ok_or_else(|| Error::UnknownLastIndex(parent))?; + let shred = blockstore + .get_data_shred(parent, index)? + .ok_or(Error::ShredNotFound { + slot: parent, + index, + })?; + shred::layout::get_merkle_root(&shred).ok_or(Error::InvalidMerkleRoot { + slot: parent, + index, + }) +} + #[cfg(test)] mod tests { use { diff --git a/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs b/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs index b98972690c78a8..e9ed6a1a6eeed4 100644 --- a/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs +++ b/turbine/src/broadcast_stage/fail_entry_verification_broadcast_run.rs @@ -15,6 +15,7 @@ pub(super) struct FailEntryVerificationBroadcastRun { shred_version: u16, good_shreds: Vec, current_slot: Slot, + chained_merkle_root: Hash, next_shred_index: u32, next_code_index: u32, cluster_nodes_cache: Arc>, @@ -31,6 +32,7 @@ impl FailEntryVerificationBroadcastRun { shred_version, good_shreds: vec![], current_slot: 0, + chained_merkle_root: Hash::default(), next_shred_index: 0, next_code_index: 0, cluster_nodes_cache, @@ -54,6 +56,12 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { let last_tick_height = receive_results.last_tick_height; if bank.slot() != self.current_slot { + self.chained_merkle_root = broadcast_utils::get_chained_merkle_root_from_parent( + bank.slot(), + bank.parent_slot(), + blockstore, + ) + .unwrap(); self.next_shred_index = 0; self.next_code_index = 0; self.current_slot = bank.slot(); @@ -92,7 +100,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { keypair, &receive_results.entries, last_tick_height == bank.max_tick_height() && last_entries.is_none(), - None, // chained_merkle_root + Some(self.chained_merkle_root), self.next_shred_index, self.next_code_index, true, // merkle_variant @@ -100,6 +108,9 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { &mut ProcessShredsStats::default(), ); + if let Some(shred) = data_shreds.iter().max_by_key(|shred| shred.index()) { + self.chained_merkle_root = shred.merkle_root().unwrap(); + } self.next_shred_index += data_shreds.len() as u32; if let Some(index) = coding_shreds.iter().map(Shred::index).max() { self.next_code_index = index + 1; @@ -109,7 +120,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { keypair, &[good_last_entry], true, - None, // chained_merkle_root + Some(self.chained_merkle_root), self.next_shred_index, self.next_code_index, true, // merkle_variant @@ -123,13 +134,15 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { keypair, &[bad_last_entry], false, - None, // chained_merkle_root + Some(self.chained_merkle_root), self.next_shred_index, self.next_code_index, true, // merkle_variant &self.reed_solomon_cache, &mut ProcessShredsStats::default(), ); + assert_eq!(good_last_data_shred.len(), 1); + self.chained_merkle_root = good_last_data_shred.last().unwrap().merkle_root().unwrap(); self.next_shred_index += 1; (good_last_data_shred, bad_last_data_shred) }); diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index e2b8871b4bc3c2..6378c0df40a8d3 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -14,6 +14,8 @@ use { shred::{shred_code, ProcessShredsStats, ReedSolomonCache, Shred, ShredFlags, Shredder}, }, solana_sdk::{ + genesis_config::ClusterType, + hash::Hash, signature::Keypair, timing::{duration_as_us, AtomicInterval}, }, @@ -69,6 +71,7 @@ impl StandardBroadcastRun { &mut self, keypair: &Keypair, max_ticks_in_slot: u8, + cluster_type: ClusterType, stats: &mut ProcessShredsStats, ) -> Vec { const SHRED_TICK_REFERENCE_MASK: u8 = ShredFlags::SHRED_TICK_REFERENCE_MASK.bits(); @@ -85,7 +88,8 @@ impl StandardBroadcastRun { keypair, &[], // entries true, // is_last_in_slot, - None, // chained_merkle_root + should_chain_merkle_shreds(state.slot, cluster_type) + .then_some(state.chained_merkle_root), state.next_shred_index, state.next_code_index, true, // merkle_variant @@ -110,6 +114,7 @@ impl StandardBroadcastRun { blockstore: &Blockstore, reference_tick: u8, is_slot_end: bool, + cluster_type: ClusterType, process_stats: &mut ProcessShredsStats, max_data_shreds_per_slot: u32, max_code_shreds_per_slot: u32, @@ -121,8 +126,12 @@ impl StandardBroadcastRun { BroadcastError, > { let (slot, parent_slot) = self.current_slot_and_parent.unwrap(); - let (next_shred_index, next_code_index) = match &self.unfinished_slot { - Some(state) => (state.next_shred_index, state.next_code_index), + let (next_shred_index, next_code_index, chained_merkle_root) = match &self.unfinished_slot { + Some(state) => ( + state.next_shred_index, + state.next_code_index, + state.chained_merkle_root, + ), None => { // If the blockstore has shreds for the slot, it should not // recreate the slot: @@ -135,7 +144,17 @@ impl StandardBroadcastRun { return Ok((Vec::default(), Vec::default())); } } - (0u32, 0u32) + let chained_merkle_root = broadcast_utils::get_chained_merkle_root_from_parent( + slot, + parent_slot, + blockstore, + ) + .unwrap_or_else(|err| { + error!("Unknown chained Merkle root: {err}"); + process_stats.err_unknown_chained_merkle_root += 1; + Hash::default() + }); + (0u32, 0u32, chained_merkle_root) } }; let shredder = @@ -144,7 +163,7 @@ impl StandardBroadcastRun { keypair, entries, is_slot_end, - None, // chained_merkle_root + should_chain_merkle_shreds(slot, cluster_type).then_some(chained_merkle_root), next_shred_index, next_code_index, true, // merkle_variant @@ -153,6 +172,10 @@ impl StandardBroadcastRun { ); process_stats.num_merkle_data_shreds += data_shreds.len(); process_stats.num_merkle_coding_shreds += coding_shreds.len(); + let chained_merkle_root = match data_shreds.iter().max_by_key(|shred| shred.index()) { + None => chained_merkle_root, + Some(shred) => shred.merkle_root().unwrap(), + }; let next_shred_index = match data_shreds.iter().map(Shred::index).max() { Some(index) => index + 1, None => next_shred_index, @@ -169,6 +192,7 @@ impl StandardBroadcastRun { return Err(BroadcastError::TooManyShreds); } self.unfinished_slot = Some(UnfinishedSlotInfo { + chained_merkle_root, next_shred_index, next_code_index, slot, @@ -232,10 +256,15 @@ impl StandardBroadcastRun { let mut process_stats = ProcessShredsStats::default(); let mut to_shreds_time = Measure::start("broadcast_to_shreds"); + let cluster_type = bank.cluster_type(); // 1) Check if slot was interrupted - let prev_slot_shreds = - self.finish_prev_slot(keypair, bank.ticks_per_slot() as u8, &mut process_stats); + let prev_slot_shreds = self.finish_prev_slot( + keypair, + bank.ticks_per_slot() as u8, + cluster_type, + &mut process_stats, + ); // 2) Convert entries to shreds and coding shreds let is_last_in_slot = last_tick_height == bank.max_tick_height(); @@ -247,6 +276,7 @@ impl StandardBroadcastRun { blockstore, reference_tick as u8, is_last_in_slot, + cluster_type, &mut process_stats, blockstore::MAX_DATA_SHREDS_PER_SLOT as u32, shred_code::MAX_CODE_SHREDS_PER_SLOT as u32, @@ -497,10 +527,15 @@ impl BroadcastRun for StandardBroadcastRun { } } +fn should_chain_merkle_shreds(_slot: Slot, _cluster_type: ClusterType) -> bool { + false +} + #[cfg(test)] mod test { use { super::*, + rand::Rng, solana_entry::entry::create_ticks, solana_gossip::cluster_info::{ClusterInfo, Node}, solana_ledger::{ @@ -510,6 +545,7 @@ mod test { solana_runtime::bank::Bank, solana_sdk::{ genesis_config::GenesisConfig, + hash::Hash, signature::{Keypair, Signer}, }, solana_streamer::socket::SocketAddrSpace, @@ -569,6 +605,7 @@ mod test { let slot = 1; let parent = 0; run.unfinished_slot = Some(UnfinishedSlotInfo { + chained_merkle_root: Hash::new_from_array(rand::thread_rng().gen()), next_shred_index, next_code_index: 17, slot, @@ -580,7 +617,12 @@ mod test { run.current_slot_and_parent = Some((4, 2)); // Slot 2 interrupted slot 1 - let shreds = run.finish_prev_slot(&keypair, 0, &mut ProcessShredsStats::default()); + let shreds = run.finish_prev_slot( + &keypair, + 0, // max_ticks_in_slot + ClusterType::Development, + &mut ProcessShredsStats::default(), + ); let shred = shreds .first() .expect("Expected a shred that signals an interrupt"); @@ -831,6 +873,7 @@ mod test { &blockstore, 0, false, + ClusterType::Development, &mut stats, 1000, 1000, @@ -846,6 +889,7 @@ mod test { &blockstore, 0, false, + ClusterType::Development, &mut stats, 10, 10, diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 6c7f691c27b5fa..4028221cd7ce68 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -58,6 +58,7 @@ solana-sdk = { workspace = true } solana-send-transaction-service = { workspace = true } solana-storage-bigtable = { workspace = true } solana-streamer = { workspace = true } +solana-svm = { workspace = true } solana-test-validator = { workspace = true } solana-tpu-client = { workspace = true } solana-version = { workspace = true } diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index 3c851e7788e2c3..aee5fc039df410 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -19,6 +19,7 @@ use { account::AccountSharedData, clock::Slot, epoch_schedule::EpochSchedule, + feature_set, native_token::sol_to_lamports, pubkey::Pubkey, rent::Rent, @@ -348,7 +349,9 @@ fn main() { exit(1); }); - let features_to_deactivate = pubkeys_of(&matches, "deactivate_feature").unwrap_or_default(); + let mut features_to_deactivate = pubkeys_of(&matches, "deactivate_feature").unwrap_or_default(); + // Remove this when client support is ready for the enable_partitioned_epoch_reward feature + features_to_deactivate.push(feature_set::enable_partitioned_epoch_reward::id()); if TestValidatorGenesis::ledger_exists(&ledger_path) { for (name, long) in &[ diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 08a7288843d803..958cdc4ec947de 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1045,6 +1045,15 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .default_value(&default_args.rpc_send_transaction_batch_size) .help("The size of transactions to be sent in batch."), ) + .arg( + Arg::with_name("rpc_send_transaction_retry_pool_max_size") + .long("rpc-send-transaction-retry-pool-max-size") + .value_name("NUMBER") + .takes_value(true) + .validator(is_parsable::) + .default_value(&default_args.rpc_send_transaction_retry_pool_max_size) + .help("The maximum size of transactions retry pool.") + ) .arg( Arg::with_name("rpc_scan_and_fix_roots") .long("rpc-scan-and-fix-roots") @@ -1957,6 +1966,7 @@ pub struct DefaultArgs { pub rpc_send_transaction_leader_forward_count: String, pub rpc_send_transaction_service_max_retries: String, pub rpc_send_transaction_batch_size: String, + pub rpc_send_transaction_retry_pool_max_size: String, pub rpc_threads: String, pub rpc_niceness_adjustment: String, pub rpc_bigtable_timeout: String, @@ -2042,6 +2052,9 @@ impl DefaultArgs { rpc_send_transaction_batch_size: default_send_transaction_service_config .batch_size .to_string(), + rpc_send_transaction_retry_pool_max_size: default_send_transaction_service_config + .retry_pool_max_size + .to_string(), rpc_threads: num_cpus::get().to_string(), rpc_niceness_adjustment: "0".to_string(), rpc_bigtable_timeout: "30".to_string(), diff --git a/validator/src/main.rs b/validator/src/main.rs index c0ea702da973fd..56b17e5d29c32e 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -45,7 +45,6 @@ use { solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_api::config::RpcLeaderScheduleConfig, solana_runtime::{ - runtime_config::RuntimeConfig, snapshot_bank_utils::DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, snapshot_config::{SnapshotConfig, SnapshotUsage}, snapshot_utils::{self, ArchiveFormat, SnapshotVersion}, @@ -59,6 +58,7 @@ use { }, solana_send_transaction_service::send_transaction_service, solana_streamer::socket::SocketAddrSpace, + solana_svm::runtime_config::RuntimeConfig, solana_tpu_client::tpu_client::DEFAULT_TPU_ENABLE_UDP, solana_validator::{ admin_rpc_service, @@ -1205,10 +1205,30 @@ pub fn main() { .ok() .map(|mb| mb * MB); + let account_shrink_paths: Option> = + values_t!(matches, "account_shrink_path", String) + .map(|shrink_paths| shrink_paths.into_iter().map(PathBuf::from).collect()) + .ok(); + let account_shrink_paths = account_shrink_paths.as_ref().map(|paths| { + create_and_canonicalize_directories(paths).unwrap_or_else(|err| { + eprintln!("Unable to access account shrink path: {err}"); + exit(1); + }) + }); + let (account_shrink_run_paths, account_shrink_snapshot_paths) = account_shrink_paths + .map(|paths| { + create_all_accounts_run_and_snapshot_dirs(&paths).unwrap_or_else(|err| { + eprintln!("Error: {err}"); + exit(1); + }) + }) + .unzip(); + let accounts_db_config = AccountsDbConfig { index: Some(accounts_index_config), base_working_path: Some(ledger_path.clone()), accounts_hash_cache_path: Some(accounts_hash_cache_path), + shrink_paths: account_shrink_run_paths, write_cache_limit_bytes: value_t!(matches, "accounts_db_cache_limit_mb", u64) .ok() .map(|mb| mb * MB as u64), @@ -1392,6 +1412,11 @@ pub fn main() { ), batch_send_rate_ms: rpc_send_batch_send_rate_ms, batch_size: rpc_send_batch_size, + retry_pool_max_size: value_t_or_exit!( + matches, + "rpc_send_transaction_retry_pool_max_size", + usize + ), }, no_poh_speed_test: matches.is_present("no_poh_speed_test"), no_os_memory_stats_reporting: matches.is_present("no_os_memory_stats_reporting"), @@ -1452,35 +1477,14 @@ pub fn main() { exit(1); }); - let account_shrink_paths: Option> = - values_t!(matches, "account_shrink_path", String) - .map(|shrink_paths| shrink_paths.into_iter().map(PathBuf::from).collect()) - .ok(); - let account_shrink_paths = account_shrink_paths.as_ref().map(|paths| { - create_and_canonicalize_directories(paths).unwrap_or_else(|err| { - eprintln!("Unable to access account shrink path: {err}"); - exit(1); - }) - }); - let (account_run_paths, account_snapshot_paths) = create_all_accounts_run_and_snapshot_dirs(&account_paths).unwrap_or_else(|err| { eprintln!("Error: {err}"); exit(1); }); - let (account_shrink_run_paths, account_shrink_snapshot_paths) = account_shrink_paths - .map(|paths| { - create_all_accounts_run_and_snapshot_dirs(&paths).unwrap_or_else(|err| { - eprintln!("Error: {err}"); - exit(1); - }) - }) - .unzip(); - // From now on, use run/ paths in the same way as the previous account_paths. validator_config.account_paths = account_run_paths; - validator_config.account_shrink_paths = account_shrink_run_paths; // These snapshot paths are only used for initial clean up, add in shrink paths if they exist. validator_config.account_snapshot_paths =