diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 24166549bb9505..0b55b887ab528a 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -145,10 +145,6 @@ const SHRINK_COLLECT_CHUNK_SIZE: usize = 50; /// candidates for shrinking. const SHRINK_INSERT_ANCIENT_THRESHOLD: usize = 10; -/// Default value for the number of ancient storages the ancient slot -/// combining should converge to. -pub const MAX_ANCIENT_SLOTS_DEFAULT: usize = 100_000; - #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] pub enum CreateAncientStorage { /// ancient storages are created by appending @@ -504,6 +500,8 @@ pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig { read_cache_limit_bytes: None, write_cache_limit_bytes: None, ancient_append_vec_offset: None, + ancient_storage_ideal_size: None, + max_ancient_storages: None, skip_initial_hash_calc: false, exhaustively_verify_refcounts: false, create_ancient_storage: CreateAncientStorage::Pack, @@ -526,6 +524,8 @@ pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig read_cache_limit_bytes: None, write_cache_limit_bytes: None, ancient_append_vec_offset: None, + ancient_storage_ideal_size: None, + max_ancient_storages: None, skip_initial_hash_calc: false, exhaustively_verify_refcounts: false, create_ancient_storage: CreateAncientStorage::Pack, @@ -617,13 +617,20 @@ pub struct AccountsAddRootTiming { /// | older |<- abs(offset) ->|<- slots in an epoch ->| max root /// | ancient | modern | /// -/// Note that another constant MAX_ANCIENT_SLOTS_DEFAULT sets a +/// Note that another constant DEFAULT_MAX_ANCIENT_STORAGES sets a /// threshold for combining ancient storages so that their overall /// number is under a certain limit, whereas this constant establishes /// the distance from the max root slot beyond which storages holding /// the account data for the slots are considered ancient by the /// shrinking algorithm. const ANCIENT_APPEND_VEC_DEFAULT_OFFSET: Option = Some(100_000); +/// The smallest size of ideal ancient storage. +/// The setting can be overridden on the command line +/// with --accounts-db-ancient-ideal-storage-size option. +const DEFAULT_ANCIENT_STORAGE_IDEAL_SIZE: u64 = 100_000; +/// Default value for the number of ancient storages the ancient slot +/// combining should converge to. +pub const DEFAULT_MAX_ANCIENT_STORAGES: usize = 100_000; #[derive(Debug, Default, Clone)] pub struct AccountsDbConfig { @@ -641,6 +648,8 @@ pub struct AccountsDbConfig { /// if None, ancient append vecs are set to ANCIENT_APPEND_VEC_DEFAULT_OFFSET /// Some(offset) means include slots up to (max_slot - (slots_per_epoch - 'offset')) pub ancient_append_vec_offset: Option, + pub ancient_storage_ideal_size: Option, + pub max_ancient_storages: Option, pub test_skip_rewrites_but_include_in_bank_hash: bool, pub skip_initial_hash_calc: bool, pub exhaustively_verify_refcounts: bool, @@ -1459,7 +1468,8 @@ pub struct AccountsDb { /// Some(offset) iff we want to squash old append vecs together into 'ancient append vecs' /// Some(offset) means for slots up to (max_slot - (slots_per_epoch - 'offset')), put them in ancient append vecs pub ancient_append_vec_offset: Option, - + pub ancient_storage_ideal_size: u64, + pub max_ancient_storages: usize, /// true iff we want to skip the initial hash calculation on startup pub skip_initial_hash_calc: bool, @@ -1994,6 +2004,12 @@ impl AccountsDb { ancient_append_vec_offset: accounts_db_config .ancient_append_vec_offset .or(ANCIENT_APPEND_VEC_DEFAULT_OFFSET), + ancient_storage_ideal_size: accounts_db_config + .ancient_storage_ideal_size + .unwrap_or(DEFAULT_ANCIENT_STORAGE_IDEAL_SIZE), + max_ancient_storages: accounts_db_config + .max_ancient_storages + .unwrap_or(DEFAULT_MAX_ANCIENT_STORAGES), account_indexes: accounts_db_config.account_indexes.unwrap_or_default(), shrink_ratio: accounts_db_config.shrink_ratio, accounts_update_notifier, @@ -16380,7 +16396,7 @@ pub mod tests { assert!(db .get_sorted_potential_ancient_slots(oldest_non_ancient_slot) .is_empty()); - let root1 = MAX_ANCIENT_SLOTS_DEFAULT as u64 + ancient_append_vec_offset as u64 + 1; + let root1 = DEFAULT_MAX_ANCIENT_STORAGES as u64 + ancient_append_vec_offset as u64 + 1; db.add_root(root1); let root2 = root1 + 1; db.add_root(root2); diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 23cc93fb4f0833..64d43cc21d8132 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -11,7 +11,6 @@ use { stats::{ShrinkAncientStats, ShrinkStatsSub}, AccountFromStorage, AccountStorageEntry, AccountsDb, AliveAccounts, GetUniqueAccountsResult, ShrinkCollect, ShrinkCollectAliveSeparatedByRefs, - MAX_ANCIENT_SLOTS_DEFAULT, }, accounts_file::AccountsFile, active_stats::ActiveStatItem, @@ -31,8 +30,6 @@ use { /// this many # of highest slot values should be treated as desirable to pack. /// This gives us high slots to move packed accounts into. const HIGH_SLOT_OFFSET: u64 = 100; -/// The smallest size of ideal ancient storage. -const MINIMAL_IDEAL_STORAGE_SIZE: u64 = 5_000_000; /// ancient packing algorithm tuning per pass #[derive(Debug)] @@ -344,9 +341,8 @@ impl AccountsDb { can_randomly_shrink: bool, ) { let tuning = PackedAncientStorageTuning { - // Slots old enough to be ancient. Setting this parameter - // to 100k makes ancient storages to be approx 5M. - max_ancient_slots: MAX_ANCIENT_SLOTS_DEFAULT, + // Slots old enough to be ancient. + max_ancient_slots: self.max_ancient_storages, // Don't re-pack anything just to shrink. // shrink_candidate_slots will handle these old storages. percent_of_alive_shrunk_data: 0, @@ -529,7 +525,7 @@ impl AccountsDb { // divided by half of max ancient slots tuning.ideal_storage_size = NonZeroU64::new( (ancient_slot_infos.total_alive_bytes.0 * 2 / tuning.max_ancient_slots.max(1) as u64) - .max(MINIMAL_IDEAL_STORAGE_SIZE), + .max(self.ancient_storage_ideal_size), ) .unwrap(); @@ -4007,7 +4003,7 @@ pub mod tests { let infos = db.collect_sort_filter_ancient_slots(slot_vec.clone(), &mut tuning); let ideal_storage_size = tuning.ideal_storage_size.get(); let max_resulting_storages = tuning.max_resulting_storages.get(); - let expected_all_infos_len = max_resulting_storages * ideal_storage_size / data_size - 1; + let expected_all_infos_len = max_resulting_storages * ideal_storage_size / data_size; assert_eq!(infos.all_infos.len(), expected_all_infos_len as usize); } diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index 2f7cb7b13e0f4f..38c7303acca410 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -139,6 +139,20 @@ pub fn accounts_db_args<'a, 'b>() -> Box<[Arg<'a, 'b>]> { .validator(|s| is_within_range(s, 1..=num_cpus::get())) .help("Number of threads to use for background accounts hashing") .hidden(hidden_unless_forced()), + Arg::with_name("accounts_db_ancient_storage_ideal_size") + .long("accounts-db-ancient-storage-ideal-size") + .value_name("BYTES") + .validator(is_parsable::) + .takes_value(true) + .help("The smallest size of ideal ancient storage.") + .hidden(hidden_unless_forced()), + Arg::with_name("accounts_db_max_ancient_storages") + .long("accounts-db-max-ancient-storages") + .value_name("USIZE") + .validator(is_parsable::) + .takes_value(true) + .help("The number of ancient storages the ancient slot combining should converge to.") + .hidden(hidden_unless_forced()), ] .into_boxed_slice() } @@ -349,6 +363,13 @@ pub fn get_accounts_db_config( accounts_hash_cache_path: Some(accounts_hash_cache_path), ancient_append_vec_offset: value_t!(arg_matches, "accounts_db_ancient_append_vecs", i64) .ok(), + ancient_storage_ideal_size: value_t!( + arg_matches, + "accounts_db_ancient_storage_ideal_size", + u64 + ) + .ok(), + max_ancient_storages: value_t!(arg_matches, "accounts_db_max_ancient_storages", usize).ok(), exhaustively_verify_refcounts: arg_matches.is_present("accounts_db_verify_refcounts"), skip_initial_hash_calc: arg_matches.is_present("accounts_db_skip_initial_hash_calculation"), test_partitioned_epoch_rewards, diff --git a/validator/src/cli.rs b/validator/src/cli.rs index eaacee531d5e22..e413d1b1f7371f 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1375,6 +1375,24 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { ) .hidden(hidden_unless_forced()), ) + .arg( + Arg::with_name("accounts_db_ancient_storage_ideal_size") + .long("accounts-db-ancient-storage-ideal-size") + .value_name("BYTES") + .validator(is_parsable::) + .takes_value(true) + .help("The smallest size of ideal ancient storage.") + .hidden(hidden_unless_forced()), + ) + .arg( + Arg::with_name("accounts_db_max_ancient_storages") + .long("accounts-db-max-ancient-storages") + .value_name("USIZE") + .validator(is_parsable::) + .takes_value(true) + .help("The number of ancient storages the ancient slot combining should converge to.") + .hidden(hidden_unless_forced()), + ) .arg( Arg::with_name("accounts_db_cache_limit_mb") .long("accounts-db-cache-limit-mb") diff --git a/validator/src/main.rs b/validator/src/main.rs index 0f8c2af1d16ec3..0a2ec83d921590 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1310,6 +1310,13 @@ pub fn main() { .ok() .map(|mb| mb * MB as u64), ancient_append_vec_offset: value_t!(matches, "accounts_db_ancient_append_vecs", i64).ok(), + ancient_storage_ideal_size: value_t!( + matches, + "accounts_db_ancient_storage_ideal_size", + u64 + ) + .ok(), + max_ancient_storages: value_t!(matches, "accounts_db_max_ancient_storages", usize).ok(), exhaustively_verify_refcounts: matches.is_present("accounts_db_verify_refcounts"), create_ancient_storage, test_partitioned_epoch_rewards,