From 744c2cbe04423664c0d3b7f5baf99c0586425548 Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 3 Jan 2024 21:53:27 -0600 Subject: [PATCH] ledger-tool: Switch subcommand dispatch from if-else to match (#34644) A future change will add more cases to this if-else if-...-else chain. Using a match statement will be easier to follow then a very long if-else if-... chain. This change was broken out in order to have a higher signal to noise ratio in the subsequent change. --- ledger-tool/src/main.rs | 3182 ++++++++++++++++++++------------------- 1 file changed, 1635 insertions(+), 1547 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index f85dbbc6d7ec17..47b5cc0024400d 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -2018,1232 +2018,1286 @@ fn main() { let verbose_level = matches.occurrences_of("verbose"); - if let ("bigtable", Some(arg_matches)) = matches.subcommand() { - bigtable_process_command(&ledger_path, arg_matches) - } else if let ("program", Some(arg_matches)) = matches.subcommand() { - program(&ledger_path, arg_matches) - } else { - let ledger_path = canonicalize_ledger_path(&ledger_path); - - match matches.subcommand() { - ("print", Some(arg_matches)) => { - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - let ending_slot = value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX); - let num_slots = value_t!(arg_matches, "num_slots", Slot).ok(); - let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); - let only_rooted = arg_matches.is_present("only_rooted"); - output_ledger( - open_blockstore(&ledger_path, arg_matches, AccessType::Secondary), - starting_slot, - ending_slot, - allow_dead_slots, - OutputFormat::Display, - num_slots, - verbose_level, - only_rooted, - ); - } - ("copy", Some(arg_matches)) => { - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); - let target_db = PathBuf::from(value_t_or_exit!(arg_matches, "target_db", String)); - - let source = open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); - - // Check if shred storage type can be inferred; if not, a new - // ledger is being created. open_blockstore() will attempt to - // to infer shred storage type as well, but this check provides - // extra insight to user on how to create a FIFO ledger. - let _ = get_shred_storage_type( - &target_db, - &format!( - "No --target-db ledger at {:?} was detected, default compaction \ + match matches.subcommand() { + ("bigtable", Some(arg_matches)) => bigtable_process_command(&ledger_path, arg_matches), + ("program", Some(arg_matches)) => program(&ledger_path, arg_matches), + _ => { + let ledger_path = canonicalize_ledger_path(&ledger_path); + + match matches.subcommand() { + ("print", Some(arg_matches)) => { + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + let ending_slot = + value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX); + let num_slots = value_t!(arg_matches, "num_slots", Slot).ok(); + let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); + let only_rooted = arg_matches.is_present("only_rooted"); + output_ledger( + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary), + starting_slot, + ending_slot, + allow_dead_slots, + OutputFormat::Display, + num_slots, + verbose_level, + only_rooted, + ); + } + ("copy", Some(arg_matches)) => { + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); + let target_db = + PathBuf::from(value_t_or_exit!(arg_matches, "target_db", String)); + + let source = open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + + // Check if shred storage type can be inferred; if not, a new + // ledger is being created. open_blockstore() will attempt to + // to infer shred storage type as well, but this check provides + // extra insight to user on how to create a FIFO ledger. + let _ = get_shred_storage_type( + &target_db, + &format!( + "No --target-db ledger at {:?} was detected, default compaction \ (RocksLevel) will be used. Fifo compaction can be enabled for a new \ ledger by manually creating {BLOCKSTORE_DIRECTORY_ROCKS_FIFO} directory \ within the specified --target_db directory.", - &target_db - ), - ); - let target = open_blockstore(&target_db, arg_matches, AccessType::Primary); - for (slot, _meta) in source.slot_meta_iterator(starting_slot).unwrap() { - if slot > ending_slot { - break; - } - if let Ok(shreds) = source.get_data_shreds_for_slot(slot, 0) { - if target.insert_shreds(shreds, None, true).is_err() { - warn!("error inserting shreds for slot {}", slot); + &target_db + ), + ); + let target = open_blockstore(&target_db, arg_matches, AccessType::Primary); + for (slot, _meta) in source.slot_meta_iterator(starting_slot).unwrap() { + if slot > ending_slot { + break; + } + if let Ok(shreds) = source.get_data_shreds_for_slot(slot, 0) { + if target.insert_shreds(shreds, None, true).is_err() { + warn!("error inserting shreds for slot {}", slot); + } } } } - } - ("genesis", Some(arg_matches)) => { - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let print_accounts = arg_matches.is_present("accounts"); - if print_accounts { - let print_account_data = !arg_matches.is_present("no_account_data"); - let print_encoding_format = parse_encoding_format(arg_matches); - for (pubkey, account) in genesis_config.accounts { - output_account( - &pubkey, - &AccountSharedData::from(account), - None, - print_account_data, - print_encoding_format, - ); + ("genesis", Some(arg_matches)) => { + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let print_accounts = arg_matches.is_present("accounts"); + if print_accounts { + let print_account_data = !arg_matches.is_present("no_account_data"); + let print_encoding_format = parse_encoding_format(arg_matches); + for (pubkey, account) in genesis_config.accounts { + output_account( + &pubkey, + &AccountSharedData::from(account), + None, + print_account_data, + print_encoding_format, + ); + } + } else { + println!("{genesis_config}"); } - } else { - println!("{genesis_config}"); } - } - ("genesis-hash", Some(arg_matches)) => { - println!( - "{}", - open_genesis_config_by(&ledger_path, arg_matches).hash() - ); - } - ("modify-genesis", Some(arg_matches)) => { - let mut genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let output_directory = - PathBuf::from(arg_matches.value_of("output_directory").unwrap()); - - if let Some(cluster_type) = cluster_type_of(arg_matches, "cluster_type") { - genesis_config.cluster_type = cluster_type; + ("genesis-hash", Some(arg_matches)) => { + println!( + "{}", + open_genesis_config_by(&ledger_path, arg_matches).hash() + ); } + ("modify-genesis", Some(arg_matches)) => { + let mut genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let output_directory = + PathBuf::from(arg_matches.value_of("output_directory").unwrap()); - if let Some(hashes_per_tick) = arg_matches.value_of("hashes_per_tick") { - genesis_config.poh_config.hashes_per_tick = match hashes_per_tick { - // Note: Unlike `solana-genesis`, "auto" is not supported here. - "sleep" => None, - _ => Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64)), + if let Some(cluster_type) = cluster_type_of(arg_matches, "cluster_type") { + genesis_config.cluster_type = cluster_type; } - } - create_new_ledger( - &output_directory, - &genesis_config, - solana_accounts_db::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, - LedgerColumnOptions::default(), - ) - .unwrap_or_else(|err| { - eprintln!("Failed to write genesis config: {err:?}"); - exit(1); - }); - - println!("{}", open_genesis_config_by(&output_directory, arg_matches)); - } - ("shred-version", Some(arg_matches)) => { - let process_options = ProcessOptions { - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - halt_at_slot: Some(0), - run_verification: false, - accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), - ..ProcessOptions::default() - }; - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let blockstore = - open_blockstore(&ledger_path, arg_matches, get_access_type(&process_options)); - let (bank_forks, _) = load_and_process_ledger_or_exit( - arg_matches, - &genesis_config, - Arc::new(blockstore), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - ); + if let Some(hashes_per_tick) = arg_matches.value_of("hashes_per_tick") { + genesis_config.poh_config.hashes_per_tick = match hashes_per_tick { + // Note: Unlike `solana-genesis`, "auto" is not supported here. + "sleep" => None, + _ => Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64)), + } + } - println!( - "{}", - compute_shred_version( - &genesis_config.hash(), - Some(&bank_forks.read().unwrap().working_bank().hard_forks()) + create_new_ledger( + &output_directory, + &genesis_config, + solana_accounts_db::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, + LedgerColumnOptions::default(), ) - ); - } - ("shred-meta", Some(arg_matches)) => { - #[derive(Debug)] - #[allow(dead_code)] - struct ShredMeta<'a> { - slot: Slot, - full_slot: bool, - shred_index: usize, - data: bool, - code: bool, - last_in_slot: bool, - data_complete: bool, - shred: &'a Shred, + .unwrap_or_else(|err| { + eprintln!("Failed to write genesis config: {err:?}"); + exit(1); + }); + + println!("{}", open_genesis_config_by(&output_directory, arg_matches)); } - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - let ending_slot = value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX); - let ledger = open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); - for (slot, _meta) in ledger - .slot_meta_iterator(starting_slot) - .unwrap() - .take_while(|(slot, _)| *slot <= ending_slot) - { - let full_slot = ledger.is_full(slot); - if let Ok(shreds) = ledger.get_data_shreds_for_slot(slot, 0) { - for (shred_index, shred) in shreds.iter().enumerate() { - println!( - "{:#?}", - ShredMeta { - slot, - full_slot, - shred_index, - data: shred.is_data(), - code: shred.is_code(), - data_complete: shred.data_complete(), - last_in_slot: shred.last_in_slot(), - shred, - } - ); + ("shred-version", Some(arg_matches)) => { + let process_options = ProcessOptions { + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + halt_at_slot: Some(0), + run_verification: false, + accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), + ..ProcessOptions::default() + }; + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let blockstore = open_blockstore( + &ledger_path, + arg_matches, + get_access_type(&process_options), + ); + let (bank_forks, _) = load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + Arc::new(blockstore), + process_options, + snapshot_archive_path, + incremental_snapshot_archive_path, + ); + + println!( + "{}", + compute_shred_version( + &genesis_config.hash(), + Some(&bank_forks.read().unwrap().working_bank().hard_forks()) + ) + ); + } + ("shred-meta", Some(arg_matches)) => { + #[derive(Debug)] + #[allow(dead_code)] + struct ShredMeta<'a> { + slot: Slot, + full_slot: bool, + shred_index: usize, + data: bool, + code: bool, + last_in_slot: bool, + data_complete: bool, + shred: &'a Shred, + } + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + let ending_slot = + value_t!(arg_matches, "ending_slot", Slot).unwrap_or(Slot::MAX); + let ledger = open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + for (slot, _meta) in ledger + .slot_meta_iterator(starting_slot) + .unwrap() + .take_while(|(slot, _)| *slot <= ending_slot) + { + let full_slot = ledger.is_full(slot); + if let Ok(shreds) = ledger.get_data_shreds_for_slot(slot, 0) { + for (shred_index, shred) in shreds.iter().enumerate() { + println!( + "{:#?}", + ShredMeta { + slot, + full_slot, + shred_index, + data: shred.is_data(), + code: shred.is_code(), + data_complete: shred.data_complete(), + last_in_slot: shred.last_in_slot(), + shred, + } + ); + } } } } - } - ("bank-hash", Some(arg_matches)) => { - let process_options = ProcessOptions { - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), - run_verification: false, - accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), - ..ProcessOptions::default() - }; - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let blockstore = - open_blockstore(&ledger_path, arg_matches, get_access_type(&process_options)); - let (bank_forks, _) = load_and_process_ledger_or_exit( - arg_matches, - &genesis_config, - Arc::new(blockstore), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - ); - println!("{}", &bank_forks.read().unwrap().working_bank().hash()); - } - ("slot", Some(arg_matches)) => { - let slots = values_t_or_exit!(arg_matches, "slots", Slot); - let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); - let blockstore = open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); - for slot in slots { - println!("Slot {slot}"); - if let Err(err) = output_slot( - &blockstore, - slot, - allow_dead_slots, - &OutputFormat::Display, - verbose_level, - &mut HashMap::new(), - ) { - eprintln!("{err}"); + ("bank-hash", Some(arg_matches)) => { + let process_options = ProcessOptions { + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), + run_verification: false, + accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), + ..ProcessOptions::default() + }; + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let blockstore = open_blockstore( + &ledger_path, + arg_matches, + get_access_type(&process_options), + ); + let (bank_forks, _) = load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + Arc::new(blockstore), + process_options, + snapshot_archive_path, + incremental_snapshot_archive_path, + ); + println!("{}", &bank_forks.read().unwrap().working_bank().hash()); + } + ("slot", Some(arg_matches)) => { + let slots = values_t_or_exit!(arg_matches, "slots", Slot); + let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + for slot in slots { + println!("Slot {slot}"); + if let Err(err) = output_slot( + &blockstore, + slot, + allow_dead_slots, + &OutputFormat::Display, + verbose_level, + &mut HashMap::new(), + ) { + eprintln!("{err}"); + } } } - } - ("json", Some(arg_matches)) => { - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); - output_ledger( - open_blockstore(&ledger_path, arg_matches, AccessType::Secondary), - starting_slot, - Slot::MAX, - allow_dead_slots, - OutputFormat::Json, - None, - std::u64::MAX, - true, - ); - } - ("dead-slots", Some(arg_matches)) => { - let blockstore = open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - for slot in blockstore.dead_slots_iterator(starting_slot).unwrap() { - println!("{slot}"); + ("json", Some(arg_matches)) => { + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + let allow_dead_slots = arg_matches.is_present("allow_dead_slots"); + output_ledger( + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary), + starting_slot, + Slot::MAX, + allow_dead_slots, + OutputFormat::Json, + None, + std::u64::MAX, + true, + ); } - } - ("duplicate-slots", Some(arg_matches)) => { - let blockstore = open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - for slot in blockstore.duplicate_slots_iterator(starting_slot).unwrap() { - println!("{slot}"); + ("dead-slots", Some(arg_matches)) => { + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + for slot in blockstore.dead_slots_iterator(starting_slot).unwrap() { + println!("{slot}"); + } } - } - ("set-dead-slot", Some(arg_matches)) => { - let slots = values_t_or_exit!(arg_matches, "slots", Slot); - let blockstore = open_blockstore(&ledger_path, arg_matches, AccessType::Primary); - for slot in slots { - match blockstore.set_dead_slot(slot) { - Ok(_) => println!("Slot {slot} dead"), - Err(err) => eprintln!("Failed to set slot {slot} dead slot: {err:?}"), + ("duplicate-slots", Some(arg_matches)) => { + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + for slot in blockstore.duplicate_slots_iterator(starting_slot).unwrap() { + println!("{slot}"); } } - } - ("remove-dead-slot", Some(arg_matches)) => { - let slots = values_t_or_exit!(arg_matches, "slots", Slot); - let blockstore = open_blockstore(&ledger_path, arg_matches, AccessType::Primary); - for slot in slots { - match blockstore.remove_dead_slot(slot) { - Ok(_) => println!("Slot {slot} not longer marked dead"), - Err(err) => { - eprintln!("Failed to remove dead flag for slot {slot}, {err:?}") + ("set-dead-slot", Some(arg_matches)) => { + let slots = values_t_or_exit!(arg_matches, "slots", Slot); + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Primary); + for slot in slots { + match blockstore.set_dead_slot(slot) { + Ok(_) => println!("Slot {slot} dead"), + Err(err) => eprintln!("Failed to set slot {slot} dead slot: {err:?}"), } } } - } - ("parse_full_frozen", Some(arg_matches)) => { - let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); - let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); - let blockstore = open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); - let mut ancestors = BTreeSet::new(); - assert!( - blockstore.meta(ending_slot).unwrap().is_some(), - "Ending slot doesn't exist" - ); - for a in AncestorIterator::new(ending_slot, &blockstore) { - ancestors.insert(a); - if a <= starting_slot { - break; + ("remove-dead-slot", Some(arg_matches)) => { + let slots = values_t_or_exit!(arg_matches, "slots", Slot); + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Primary); + for slot in slots { + match blockstore.remove_dead_slot(slot) { + Ok(_) => println!("Slot {slot} not longer marked dead"), + Err(err) => { + eprintln!("Failed to remove dead flag for slot {slot}, {err:?}") + } + } } } - println!("ancestors: {:?}", ancestors.iter()); - - let mut frozen = BTreeMap::new(); - let mut full = BTreeMap::new(); - let frozen_regex = Regex::new(r"bank frozen: (\d*)").unwrap(); - let full_regex = Regex::new(r"slot (\d*) is full").unwrap(); - - let log_file = PathBuf::from(value_t_or_exit!(arg_matches, "log_path", String)); - let f = BufReader::new(File::open(log_file).unwrap()); - println!("Reading log file"); - for line in f.lines().map_while(Result::ok) { - let parse_results = { - if let Some(slot_string) = frozen_regex.captures_iter(&line).next() { - Some((slot_string, &mut frozen)) - } else { - full_regex - .captures_iter(&line) - .next() - .map(|slot_string| (slot_string, &mut full)) + ("parse_full_frozen", Some(arg_matches)) => { + let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); + let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + let mut ancestors = BTreeSet::new(); + assert!( + blockstore.meta(ending_slot).unwrap().is_some(), + "Ending slot doesn't exist" + ); + for a in AncestorIterator::new(ending_slot, &blockstore) { + ancestors.insert(a); + if a <= starting_slot { + break; } - }; + } + println!("ancestors: {:?}", ancestors.iter()); + + let mut frozen = BTreeMap::new(); + let mut full = BTreeMap::new(); + let frozen_regex = Regex::new(r"bank frozen: (\d*)").unwrap(); + let full_regex = Regex::new(r"slot (\d*) is full").unwrap(); + + let log_file = PathBuf::from(value_t_or_exit!(arg_matches, "log_path", String)); + let f = BufReader::new(File::open(log_file).unwrap()); + println!("Reading log file"); + for line in f.lines().map_while(Result::ok) { + let parse_results = { + if let Some(slot_string) = frozen_regex.captures_iter(&line).next() { + Some((slot_string, &mut frozen)) + } else { + full_regex + .captures_iter(&line) + .next() + .map(|slot_string| (slot_string, &mut full)) + } + }; - if let Some((slot_string, map)) = parse_results { - let slot = slot_string - .get(1) - .expect("Only one match group") - .as_str() - .parse::() - .unwrap(); - if ancestors.contains(&slot) && !map.contains_key(&slot) { - map.insert(slot, line); - } - if slot == ending_slot - && frozen.contains_key(&slot) - && full.contains_key(&slot) - { - break; + if let Some((slot_string, map)) = parse_results { + let slot = slot_string + .get(1) + .expect("Only one match group") + .as_str() + .parse::() + .unwrap(); + if ancestors.contains(&slot) && !map.contains_key(&slot) { + map.insert(slot, line); + } + if slot == ending_slot + && frozen.contains_key(&slot) + && full.contains_key(&slot) + { + break; + } } } - } - for ((slot1, frozen_log), (slot2, full_log)) in frozen.iter().zip(full.iter()) { - assert_eq!(slot1, slot2); - println!("Slot: {slot1}\n, full: {full_log}\n, frozen: {frozen_log}"); + for ((slot1, frozen_log), (slot2, full_log)) in frozen.iter().zip(full.iter()) { + assert_eq!(slot1, slot2); + println!("Slot: {slot1}\n, full: {full_log}\n, frozen: {frozen_log}"); + } } - } - ("verify", Some(arg_matches)) => { - let exit_signal = Arc::new(AtomicBool::new(false)); - let report_os_memory_stats = arg_matches.is_present("os_memory_stats_reporting"); - let system_monitor_service = SystemMonitorService::new( - Arc::clone(&exit_signal), - SystemMonitorStatsReportConfig { - report_os_memory_stats, - report_os_network_stats: false, - report_os_cpu_stats: false, - report_os_disk_stats: false, - }, - ); + ("verify", Some(arg_matches)) => { + let exit_signal = Arc::new(AtomicBool::new(false)); + let report_os_memory_stats = + arg_matches.is_present("os_memory_stats_reporting"); + let system_monitor_service = SystemMonitorService::new( + Arc::clone(&exit_signal), + SystemMonitorStatsReportConfig { + report_os_memory_stats, + report_os_network_stats: false, + report_os_cpu_stats: false, + report_os_disk_stats: false, + }, + ); - let debug_keys = pubkeys_of(arg_matches, "debug_key") - .map(|pubkeys| Arc::new(pubkeys.into_iter().collect::>())); + let debug_keys = pubkeys_of(arg_matches, "debug_key") + .map(|pubkeys| Arc::new(pubkeys.into_iter().collect::>())); - if arg_matches.is_present("skip_poh_verify") { - eprintln!( - "--skip-poh-verify is deprecated. Replace with --skip-verification." - ); - } + if arg_matches.is_present("skip_poh_verify") { + eprintln!( + "--skip-poh-verify is deprecated. Replace with --skip-verification." + ); + } - let process_options = ProcessOptions { - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - run_verification: !(arg_matches.is_present("skip_poh_verify") - || arg_matches.is_present("skip_verification")), - on_halt_store_hash_raw_data_for_debug: arg_matches - .is_present("halt_at_slot_store_hash_raw_data"), - run_final_accounts_hash_calc: arg_matches.is_present("run_final_hash_calc"), - halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), - debug_keys, - limit_load_slot_count_from_snapshot: value_t!( + let process_options = ProcessOptions { + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + run_verification: !(arg_matches.is_present("skip_poh_verify") + || arg_matches.is_present("skip_verification")), + on_halt_store_hash_raw_data_for_debug: arg_matches + .is_present("halt_at_slot_store_hash_raw_data"), + run_final_accounts_hash_calc: arg_matches.is_present("run_final_hash_calc"), + halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), + debug_keys, + limit_load_slot_count_from_snapshot: value_t!( + arg_matches, + "limit_load_slot_count_from_snapshot", + usize + ) + .ok(), + accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), + verify_index: arg_matches.is_present("verify_accounts_index"), + allow_dead_slots: arg_matches.is_present("allow_dead_slots"), + accounts_db_test_hash_calculation: arg_matches + .is_present("accounts_db_test_hash_calculation"), + accounts_db_skip_shrink: arg_matches.is_present("accounts_db_skip_shrink"), + runtime_config: RuntimeConfig::default(), + use_snapshot_archives_at_startup: value_t_or_exit!( + arg_matches, + use_snapshot_archives_at_startup::cli::NAME, + UseSnapshotArchivesAtStartup + ), + ..ProcessOptions::default() + }; + let print_accounts_stats = arg_matches.is_present("print_accounts_stats"); + let write_bank_file = arg_matches.is_present("write_bank_file"); + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + info!("genesis hash: {}", genesis_config.hash()); + + let blockstore = open_blockstore( + &ledger_path, arg_matches, - "limit_load_slot_count_from_snapshot", - usize - ) - .ok(), - accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), - verify_index: arg_matches.is_present("verify_accounts_index"), - allow_dead_slots: arg_matches.is_present("allow_dead_slots"), - accounts_db_test_hash_calculation: arg_matches - .is_present("accounts_db_test_hash_calculation"), - accounts_db_skip_shrink: arg_matches.is_present("accounts_db_skip_shrink"), - runtime_config: RuntimeConfig::default(), - use_snapshot_archives_at_startup: value_t_or_exit!( + get_access_type(&process_options), + ); + let (bank_forks, _) = load_and_process_ledger_or_exit( arg_matches, - use_snapshot_archives_at_startup::cli::NAME, - UseSnapshotArchivesAtStartup - ), - ..ProcessOptions::default() - }; - let print_accounts_stats = arg_matches.is_present("print_accounts_stats"); - let write_bank_file = arg_matches.is_present("write_bank_file"); - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - info!("genesis hash: {}", genesis_config.hash()); - - let blockstore = - open_blockstore(&ledger_path, arg_matches, get_access_type(&process_options)); - let (bank_forks, _) = load_and_process_ledger_or_exit( - arg_matches, - &genesis_config, - Arc::new(blockstore), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - ); + &genesis_config, + Arc::new(blockstore), + process_options, + snapshot_archive_path, + incremental_snapshot_archive_path, + ); - if print_accounts_stats { - let working_bank = bank_forks.read().unwrap().working_bank(); - working_bank.print_accounts_stats(); - } - if write_bank_file { - let working_bank = bank_forks.read().unwrap().working_bank(); - bank_hash_details::write_bank_hash_details_file(&working_bank) - .map_err(|err| { - warn!("Unable to write bank hash_details file: {err}"); - }) - .ok(); + if print_accounts_stats { + let working_bank = bank_forks.read().unwrap().working_bank(); + working_bank.print_accounts_stats(); + } + if write_bank_file { + let working_bank = bank_forks.read().unwrap().working_bank(); + bank_hash_details::write_bank_hash_details_file(&working_bank) + .map_err(|err| { + warn!("Unable to write bank hash_details file: {err}"); + }) + .ok(); + } + exit_signal.store(true, Ordering::Relaxed); + system_monitor_service.join().unwrap(); } - exit_signal.store(true, Ordering::Relaxed); - system_monitor_service.join().unwrap(); - } - ("graph", Some(arg_matches)) => { - let output_file = value_t_or_exit!(arg_matches, "graph_filename", String); - let graph_config = GraphConfig { - include_all_votes: arg_matches.is_present("include_all_votes"), - vote_account_mode: value_t_or_exit!( - arg_matches, - "vote_account_mode", - GraphVoteAccountMode - ), - }; + ("graph", Some(arg_matches)) => { + let output_file = value_t_or_exit!(arg_matches, "graph_filename", String); + let graph_config = GraphConfig { + include_all_votes: arg_matches.is_present("include_all_votes"), + vote_account_mode: value_t_or_exit!( + arg_matches, + "vote_account_mode", + GraphVoteAccountMode + ), + }; - let process_options = ProcessOptions { - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), - run_verification: false, - accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), - use_snapshot_archives_at_startup: value_t_or_exit!( - arg_matches, - use_snapshot_archives_at_startup::cli::NAME, - UseSnapshotArchivesAtStartup - ), - ..ProcessOptions::default() - }; + let process_options = ProcessOptions { + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + halt_at_slot: value_t!(arg_matches, "halt_at_slot", Slot).ok(), + run_verification: false, + accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), + use_snapshot_archives_at_startup: value_t_or_exit!( + arg_matches, + use_snapshot_archives_at_startup::cli::NAME, + UseSnapshotArchivesAtStartup + ), + ..ProcessOptions::default() + }; - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let blockstore = - open_blockstore(&ledger_path, arg_matches, get_access_type(&process_options)); - let (bank_forks, _) = load_and_process_ledger_or_exit( - arg_matches, - &genesis_config, - Arc::new(blockstore), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - ); + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let blockstore = open_blockstore( + &ledger_path, + arg_matches, + get_access_type(&process_options), + ); + let (bank_forks, _) = load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + Arc::new(blockstore), + process_options, + snapshot_archive_path, + incremental_snapshot_archive_path, + ); - let dot = graph_forks(&bank_forks.read().unwrap(), &graph_config); - let extension = Path::new(&output_file).extension(); - let result = if extension == Some(OsStr::new("pdf")) { - render_dot(dot, &output_file, "pdf") - } else if extension == Some(OsStr::new("png")) { - render_dot(dot, &output_file, "png") - } else { - File::create(&output_file) - .and_then(|mut file| file.write_all(&dot.into_bytes())) - }; + let dot = graph_forks(&bank_forks.read().unwrap(), &graph_config); + let extension = Path::new(&output_file).extension(); + let result = if extension == Some(OsStr::new("pdf")) { + render_dot(dot, &output_file, "pdf") + } else if extension == Some(OsStr::new("png")) { + render_dot(dot, &output_file, "png") + } else { + File::create(&output_file) + .and_then(|mut file| file.write_all(&dot.into_bytes())) + }; - match result { - Ok(_) => println!("Wrote {output_file}"), - Err(err) => eprintln!("Unable to write {output_file}: {err}"), + match result { + Ok(_) => println!("Wrote {output_file}"), + Err(err) => eprintln!("Unable to write {output_file}: {err}"), + } } - } - ("create-snapshot", Some(arg_matches)) => { - let is_incremental = arg_matches.is_present("incremental"); - let is_minimized = arg_matches.is_present("minimized"); - let output_directory = value_t!(arg_matches, "output_directory", PathBuf) - .unwrap_or_else(|_| { - match ( - is_incremental, - &snapshot_archive_path, - &incremental_snapshot_archive_path, - ) { - (true, _, Some(incremental_snapshot_archive_path)) => { - incremental_snapshot_archive_path.clone() + ("create-snapshot", Some(arg_matches)) => { + let is_incremental = arg_matches.is_present("incremental"); + let is_minimized = arg_matches.is_present("minimized"); + let output_directory = value_t!(arg_matches, "output_directory", PathBuf) + .unwrap_or_else(|_| { + match ( + is_incremental, + &snapshot_archive_path, + &incremental_snapshot_archive_path, + ) { + (true, _, Some(incremental_snapshot_archive_path)) => { + incremental_snapshot_archive_path.clone() + } + (_, Some(snapshot_archive_path), _) => { + snapshot_archive_path.clone() + } + (_, _, _) => ledger_path.clone(), } - (_, Some(snapshot_archive_path), _) => snapshot_archive_path.clone(), - (_, _, _) => ledger_path.clone(), - } - }); - let mut warp_slot = value_t!(arg_matches, "warp_slot", Slot).ok(); - let remove_stake_accounts = arg_matches.is_present("remove_stake_accounts"); - let new_hard_forks = hardforks_of(arg_matches, "hard_forks"); - - let faucet_pubkey = pubkey_of(arg_matches, "faucet_pubkey"); - let faucet_lamports = value_t!(arg_matches, "faucet_lamports", u64).unwrap_or(0); - - let rent_burn_percentage = value_t!(arg_matches, "rent_burn_percentage", u8); - let hashes_per_tick = arg_matches.value_of("hashes_per_tick"); - - let bootstrap_stake_authorized_pubkey = - pubkey_of(arg_matches, "bootstrap_stake_authorized_pubkey"); - let bootstrap_validator_lamports = - value_t_or_exit!(arg_matches, "bootstrap_validator_lamports", u64); - let bootstrap_validator_stake_lamports = - value_t_or_exit!(arg_matches, "bootstrap_validator_stake_lamports", u64); - let minimum_stake_lamports = rent.minimum_balance(StakeStateV2::size_of()); - if bootstrap_validator_stake_lamports < minimum_stake_lamports { - eprintln!( + }); + let mut warp_slot = value_t!(arg_matches, "warp_slot", Slot).ok(); + let remove_stake_accounts = arg_matches.is_present("remove_stake_accounts"); + let new_hard_forks = hardforks_of(arg_matches, "hard_forks"); + + let faucet_pubkey = pubkey_of(arg_matches, "faucet_pubkey"); + let faucet_lamports = + value_t!(arg_matches, "faucet_lamports", u64).unwrap_or(0); + + let rent_burn_percentage = value_t!(arg_matches, "rent_burn_percentage", u8); + let hashes_per_tick = arg_matches.value_of("hashes_per_tick"); + + let bootstrap_stake_authorized_pubkey = + pubkey_of(arg_matches, "bootstrap_stake_authorized_pubkey"); + let bootstrap_validator_lamports = + value_t_or_exit!(arg_matches, "bootstrap_validator_lamports", u64); + let bootstrap_validator_stake_lamports = + value_t_or_exit!(arg_matches, "bootstrap_validator_stake_lamports", u64); + let minimum_stake_lamports = rent.minimum_balance(StakeStateV2::size_of()); + if bootstrap_validator_stake_lamports < minimum_stake_lamports { + eprintln!( "Error: insufficient --bootstrap-validator-stake-lamports. Minimum amount \ is {minimum_stake_lamports}" ); - exit(1); - } - let bootstrap_validator_pubkeys = pubkeys_of(arg_matches, "bootstrap_validator"); - let accounts_to_remove = - pubkeys_of(arg_matches, "accounts_to_remove").unwrap_or_default(); - let feature_gates_to_deactivate = - pubkeys_of(arg_matches, "feature_gates_to_deactivate").unwrap_or_default(); - let vote_accounts_to_destake: HashSet<_> = - pubkeys_of(arg_matches, "vote_accounts_to_destake") - .unwrap_or_default() - .into_iter() - .collect(); - let snapshot_version = arg_matches.value_of("snapshot_version").map_or( - SnapshotVersion::default(), - |s| { - s.parse::().unwrap_or_else(|e| { - eprintln!("Error: {e}"); - exit(1) - }) - }, - ); + exit(1); + } + let bootstrap_validator_pubkeys = + pubkeys_of(arg_matches, "bootstrap_validator"); + let accounts_to_remove = + pubkeys_of(arg_matches, "accounts_to_remove").unwrap_or_default(); + let feature_gates_to_deactivate = + pubkeys_of(arg_matches, "feature_gates_to_deactivate").unwrap_or_default(); + let vote_accounts_to_destake: HashSet<_> = + pubkeys_of(arg_matches, "vote_accounts_to_destake") + .unwrap_or_default() + .into_iter() + .collect(); + let snapshot_version = arg_matches.value_of("snapshot_version").map_or( + SnapshotVersion::default(), + |s| { + s.parse::().unwrap_or_else(|e| { + eprintln!("Error: {e}"); + exit(1) + }) + }, + ); - let snapshot_archive_format = { - let archive_format_str = - value_t_or_exit!(arg_matches, "snapshot_archive_format", String); - ArchiveFormat::from_cli_arg(&archive_format_str).unwrap_or_else(|| { - panic!("Archive format not recognized: {archive_format_str}") - }) - }; + let snapshot_archive_format = { + let archive_format_str = + value_t_or_exit!(arg_matches, "snapshot_archive_format", String); + ArchiveFormat::from_cli_arg(&archive_format_str).unwrap_or_else(|| { + panic!("Archive format not recognized: {archive_format_str}") + }) + }; - let maximum_full_snapshot_archives_to_retain = value_t_or_exit!( - arg_matches, - "maximum_full_snapshots_to_retain", - NonZeroUsize - ); - let maximum_incremental_snapshot_archives_to_retain = value_t_or_exit!( - arg_matches, - "maximum_incremental_snapshots_to_retain", - NonZeroUsize - ); - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let mut process_options = ProcessOptions { - new_hard_forks, - run_verification: false, - accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), - accounts_db_skip_shrink: arg_matches.is_present("accounts_db_skip_shrink"), - use_snapshot_archives_at_startup: value_t_or_exit!( + let maximum_full_snapshot_archives_to_retain = value_t_or_exit!( arg_matches, - use_snapshot_archives_at_startup::cli::NAME, - UseSnapshotArchivesAtStartup - ), - ..ProcessOptions::default() - }; - let blockstore = Arc::new(open_blockstore( - &ledger_path, - arg_matches, - get_access_type(&process_options), - )); + "maximum_full_snapshots_to_retain", + NonZeroUsize + ); + let maximum_incremental_snapshot_archives_to_retain = value_t_or_exit!( + arg_matches, + "maximum_incremental_snapshots_to_retain", + NonZeroUsize + ); + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let mut process_options = ProcessOptions { + new_hard_forks, + run_verification: false, + accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), + accounts_db_skip_shrink: arg_matches.is_present("accounts_db_skip_shrink"), + use_snapshot_archives_at_startup: value_t_or_exit!( + arg_matches, + use_snapshot_archives_at_startup::cli::NAME, + UseSnapshotArchivesAtStartup + ), + ..ProcessOptions::default() + }; + let blockstore = Arc::new(open_blockstore( + &ledger_path, + arg_matches, + get_access_type(&process_options), + )); - let snapshot_slot = if Some("ROOT") == arg_matches.value_of("snapshot_slot") { - blockstore - .rooted_slot_iterator(0) - .expect("Failed to get rooted slot iterator") - .last() - .expect("Failed to get root") - } else { - value_t_or_exit!(arg_matches, "snapshot_slot", Slot) - }; + let snapshot_slot = if Some("ROOT") == arg_matches.value_of("snapshot_slot") { + blockstore + .rooted_slot_iterator(0) + .expect("Failed to get rooted slot iterator") + .last() + .expect("Failed to get root") + } else { + value_t_or_exit!(arg_matches, "snapshot_slot", Slot) + }; - if blockstore - .meta(snapshot_slot) - .unwrap() - .filter(|m| m.is_full()) - .is_none() - { - eprintln!( + if blockstore + .meta(snapshot_slot) + .unwrap() + .filter(|m| m.is_full()) + .is_none() + { + eprintln!( "Error: snapshot slot {snapshot_slot} does not exist in blockstore or is \ not full.", ); - exit(1); - } - process_options.halt_at_slot = Some(snapshot_slot); - - let ending_slot = if is_minimized { - let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); - if ending_slot <= snapshot_slot { - eprintln!( - "Error: ending_slot ({ending_slot}) must be greater than \ - snapshot_slot ({snapshot_slot})" - ); exit(1); } + process_options.halt_at_slot = Some(snapshot_slot); - Some(ending_slot) - } else { - None - }; + let ending_slot = if is_minimized { + let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); + if ending_slot <= snapshot_slot { + eprintln!( + "Error: ending_slot ({ending_slot}) must be greater than \ + snapshot_slot ({snapshot_slot})" + ); + exit(1); + } - let snapshot_type_str = if is_incremental { - "incremental " - } else if is_minimized { - "minimized " - } else { - "" - }; + Some(ending_slot) + } else { + None + }; - info!( - "Creating {}snapshot of slot {} in {}", - snapshot_type_str, - snapshot_slot, - output_directory.display() - ); + let snapshot_type_str = if is_incremental { + "incremental " + } else if is_minimized { + "minimized " + } else { + "" + }; - let (bank_forks, starting_snapshot_hashes) = load_and_process_ledger_or_exit( - arg_matches, - &genesis_config, - blockstore.clone(), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - ); - let mut bank = bank_forks - .read() - .unwrap() - .get(snapshot_slot) - .unwrap_or_else(|| { - eprintln!("Error: Slot {snapshot_slot} is not available"); - exit(1); - }); + info!( + "Creating {}snapshot of slot {} in {}", + snapshot_type_str, + snapshot_slot, + output_directory.display() + ); - let child_bank_required = rent_burn_percentage.is_ok() - || hashes_per_tick.is_some() - || remove_stake_accounts - || !accounts_to_remove.is_empty() - || !feature_gates_to_deactivate.is_empty() - || !vote_accounts_to_destake.is_empty() - || faucet_pubkey.is_some() - || bootstrap_validator_pubkeys.is_some(); - - if child_bank_required { - let mut child_bank = - Bank::new_from_parent(bank.clone(), bank.collector_id(), bank.slot() + 1); - - if let Ok(rent_burn_percentage) = rent_burn_percentage { - child_bank.set_rent_burn_percentage(rent_burn_percentage); + let (bank_forks, starting_snapshot_hashes) = load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + blockstore.clone(), + process_options, + snapshot_archive_path, + incremental_snapshot_archive_path, + ); + let mut bank = bank_forks + .read() + .unwrap() + .get(snapshot_slot) + .unwrap_or_else(|| { + eprintln!("Error: Slot {snapshot_slot} is not available"); + exit(1); + }); + + let child_bank_required = rent_burn_percentage.is_ok() + || hashes_per_tick.is_some() + || remove_stake_accounts + || !accounts_to_remove.is_empty() + || !feature_gates_to_deactivate.is_empty() + || !vote_accounts_to_destake.is_empty() + || faucet_pubkey.is_some() + || bootstrap_validator_pubkeys.is_some(); + + if child_bank_required { + let mut child_bank = Bank::new_from_parent( + bank.clone(), + bank.collector_id(), + bank.slot() + 1, + ); + + if let Ok(rent_burn_percentage) = rent_burn_percentage { + child_bank.set_rent_burn_percentage(rent_burn_percentage); + } + + if let Some(hashes_per_tick) = hashes_per_tick { + child_bank.set_hashes_per_tick(match hashes_per_tick { + // Note: Unlike `solana-genesis`, "auto" is not supported here. + "sleep" => None, + _ => Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64)), + }); + } + bank = Arc::new(child_bank); } - if let Some(hashes_per_tick) = hashes_per_tick { - child_bank.set_hashes_per_tick(match hashes_per_tick { - // Note: Unlike `solana-genesis`, "auto" is not supported here. - "sleep" => None, - _ => Some(value_t_or_exit!(arg_matches, "hashes_per_tick", u64)), - }); + if let Some(faucet_pubkey) = faucet_pubkey { + bank.store_account( + &faucet_pubkey, + &AccountSharedData::new(faucet_lamports, 0, &system_program::id()), + ); } - bank = Arc::new(child_bank); - } - if let Some(faucet_pubkey) = faucet_pubkey { - bank.store_account( - &faucet_pubkey, - &AccountSharedData::new(faucet_lamports, 0, &system_program::id()), - ); - } + if remove_stake_accounts { + for (address, mut account) in bank + .get_program_accounts(&stake::program::id(), &ScanConfig::default()) + .unwrap() + .into_iter() + { + account.set_lamports(0); + bank.store_account(&address, &account); + } + } + + for address in accounts_to_remove { + let mut account = bank.get_account(&address).unwrap_or_else(|| { + eprintln!( + "Error: Account does not exist, unable to remove it: {address}" + ); + exit(1); + }); - if remove_stake_accounts { - for (address, mut account) in bank - .get_program_accounts(&stake::program::id(), &ScanConfig::default()) - .unwrap() - .into_iter() - { account.set_lamports(0); bank.store_account(&address, &account); + debug!("Account removed: {address}"); } - } - for address in accounts_to_remove { - let mut account = bank.get_account(&address).unwrap_or_else(|| { - eprintln!("Error: Account does not exist, unable to remove it: {address}"); - exit(1); - }); - - account.set_lamports(0); - bank.store_account(&address, &account); - debug!("Account removed: {address}"); - } - - for address in feature_gates_to_deactivate { - let mut account = bank.get_account(&address).unwrap_or_else(|| { - eprintln!( - "Error: Feature-gate account does not exist, unable to \ + for address in feature_gates_to_deactivate { + let mut account = bank.get_account(&address).unwrap_or_else(|| { + eprintln!( + "Error: Feature-gate account does not exist, unable to \ deactivate it: {address}" - ); - exit(1); - }); + ); + exit(1); + }); - match feature::from_account(&account) { - Some(feature) => { - if feature.activated_at.is_none() { - warn!("Feature gate is not yet activated: {address}"); + match feature::from_account(&account) { + Some(feature) => { + if feature.activated_at.is_none() { + warn!("Feature gate is not yet activated: {address}"); + } + } + None => { + eprintln!("Error: Account is not a `Feature`: {address}"); + exit(1); } } - None => { - eprintln!("Error: Account is not a `Feature`: {address}"); - exit(1); - } - } - account.set_lamports(0); - bank.store_account(&address, &account); - debug!("Feature gate deactivated: {address}"); - } + account.set_lamports(0); + bank.store_account(&address, &account); + debug!("Feature gate deactivated: {address}"); + } - if !vote_accounts_to_destake.is_empty() { - for (address, mut account) in bank - .get_program_accounts(&stake::program::id(), &ScanConfig::default()) - .unwrap() - .into_iter() - { - if let Ok(StakeStateV2::Stake(meta, stake, _)) = account.state() { - if vote_accounts_to_destake.contains(&stake.delegation.voter_pubkey) { - if verbose_level > 0 { - warn!( - "Undelegating stake account {} from {}", - address, stake.delegation.voter_pubkey, - ); + if !vote_accounts_to_destake.is_empty() { + for (address, mut account) in bank + .get_program_accounts(&stake::program::id(), &ScanConfig::default()) + .unwrap() + .into_iter() + { + if let Ok(StakeStateV2::Stake(meta, stake, _)) = account.state() { + if vote_accounts_to_destake.contains(&stake.delegation.voter_pubkey) + { + if verbose_level > 0 { + warn!( + "Undelegating stake account {} from {}", + address, stake.delegation.voter_pubkey, + ); + } + account.set_state(&StakeStateV2::Initialized(meta)).unwrap(); + bank.store_account(&address, &account); } - account.set_state(&StakeStateV2::Initialized(meta)).unwrap(); - bank.store_account(&address, &account); } } } - } - if let Some(bootstrap_validator_pubkeys) = bootstrap_validator_pubkeys { - assert_eq!(bootstrap_validator_pubkeys.len() % 3, 0); + if let Some(bootstrap_validator_pubkeys) = bootstrap_validator_pubkeys { + assert_eq!(bootstrap_validator_pubkeys.len() % 3, 0); - // Ensure there are no duplicated pubkeys in the --bootstrap-validator list - { - let mut v = bootstrap_validator_pubkeys.clone(); - v.sort(); - v.dedup(); - if v.len() != bootstrap_validator_pubkeys.len() { - eprintln!("Error: --bootstrap-validator pubkeys cannot be duplicated"); - exit(1); + // Ensure there are no duplicated pubkeys in the --bootstrap-validator list + { + let mut v = bootstrap_validator_pubkeys.clone(); + v.sort(); + v.dedup(); + if v.len() != bootstrap_validator_pubkeys.len() { + eprintln!( + "Error: --bootstrap-validator pubkeys cannot be duplicated" + ); + exit(1); + } } - } - // Delete existing vote accounts - for (address, mut account) in bank - .get_program_accounts(&solana_vote_program::id(), &ScanConfig::default()) - .unwrap() - .into_iter() - { - account.set_lamports(0); - bank.store_account(&address, &account); - } - - // Add a new identity/vote/stake account for each of the provided bootstrap - // validators - let mut bootstrap_validator_pubkeys_iter = bootstrap_validator_pubkeys.iter(); - loop { - let Some(identity_pubkey) = bootstrap_validator_pubkeys_iter.next() else { - break; - }; - let vote_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap(); - let stake_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap(); + // Delete existing vote accounts + for (address, mut account) in bank + .get_program_accounts( + &solana_vote_program::id(), + &ScanConfig::default(), + ) + .unwrap() + .into_iter() + { + account.set_lamports(0); + bank.store_account(&address, &account); + } - bank.store_account( - identity_pubkey, - &AccountSharedData::new( - bootstrap_validator_lamports, - 0, - &system_program::id(), - ), - ); + // Add a new identity/vote/stake account for each of the provided bootstrap + // validators + let mut bootstrap_validator_pubkeys_iter = + bootstrap_validator_pubkeys.iter(); + loop { + let Some(identity_pubkey) = bootstrap_validator_pubkeys_iter.next() + else { + break; + }; + let vote_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap(); + let stake_pubkey = bootstrap_validator_pubkeys_iter.next().unwrap(); + + bank.store_account( + identity_pubkey, + &AccountSharedData::new( + bootstrap_validator_lamports, + 0, + &system_program::id(), + ), + ); - let vote_account = vote_state::create_account_with_authorized( - identity_pubkey, - identity_pubkey, - identity_pubkey, - 100, - VoteState::get_rent_exempt_reserve(&rent).max(1), - ); + let vote_account = vote_state::create_account_with_authorized( + identity_pubkey, + identity_pubkey, + identity_pubkey, + 100, + VoteState::get_rent_exempt_reserve(&rent).max(1), + ); - bank.store_account( - stake_pubkey, - &stake_state::create_account( - bootstrap_stake_authorized_pubkey - .as_ref() - .unwrap_or(identity_pubkey), - vote_pubkey, - &vote_account, - &rent, - bootstrap_validator_stake_lamports, - ), - ); - bank.store_account(vote_pubkey, &vote_account); - } + bank.store_account( + stake_pubkey, + &stake_state::create_account( + bootstrap_stake_authorized_pubkey + .as_ref() + .unwrap_or(identity_pubkey), + vote_pubkey, + &vote_account, + &rent, + bootstrap_validator_stake_lamports, + ), + ); + bank.store_account(vote_pubkey, &vote_account); + } - // Warp ahead at least two epochs to ensure that the leader schedule will be - // updated to reflect the new bootstrap validator(s) - let minimum_warp_slot = genesis_config.epoch_schedule.get_first_slot_in_epoch( - genesis_config.epoch_schedule.get_epoch(snapshot_slot) + 2, - ); + // Warp ahead at least two epochs to ensure that the leader schedule will be + // updated to reflect the new bootstrap validator(s) + let minimum_warp_slot = + genesis_config.epoch_schedule.get_first_slot_in_epoch( + genesis_config.epoch_schedule.get_epoch(snapshot_slot) + 2, + ); - if let Some(warp_slot) = warp_slot { - if warp_slot < minimum_warp_slot { - eprintln!( - "Error: --warp-slot too close. Must be >= \ + if let Some(warp_slot) = warp_slot { + if warp_slot < minimum_warp_slot { + eprintln!( + "Error: --warp-slot too close. Must be >= \ {minimum_warp_slot}" - ); - exit(1); + ); + exit(1); + } + } else { + warn!("Warping to slot {}", minimum_warp_slot); + warp_slot = Some(minimum_warp_slot); } - } else { - warn!("Warping to slot {}", minimum_warp_slot); - warp_slot = Some(minimum_warp_slot); } - } - if child_bank_required { - while !bank.is_complete() { - bank.register_unique_tick(); + if child_bank_required { + while !bank.is_complete() { + bank.register_unique_tick(); + } } - } - - bank.set_capitalization(); - - let bank = if let Some(warp_slot) = warp_slot { - // need to flush the write cache in order to use Storages to calculate - // the accounts hash, and need to root `bank` before flushing the cache - bank.rc.accounts.accounts_db.add_root(bank.slot()); - bank.force_flush_accounts_cache(); - Arc::new(Bank::warp_from_parent( - bank.clone(), - bank.collector_id(), - warp_slot, - CalcAccountsHashDataSource::Storages, - )) - } else { - bank - }; - let minimize_snapshot_possibly_incomplete = if is_minimized { - minimize_bank_for_snapshot( - &blockstore, - &bank, - snapshot_slot, - ending_slot.unwrap(), - ) - } else { - false - }; - - println!( - "Creating a version {} {}snapshot of slot {}", - snapshot_version, - snapshot_type_str, - bank.slot(), - ); - - if is_incremental { - if starting_snapshot_hashes.is_none() { - eprintln!( - "Unable to create incremental snapshot without a base full \ - snapshot" - ); - exit(1); - } - let full_snapshot_slot = starting_snapshot_hashes.unwrap().full.0 .0; - if bank.slot() <= full_snapshot_slot { - eprintln!( - "Unable to create incremental snapshot: Slot must be greater \ - than full snapshot slot. slot: {}, full snapshot slot: {}", - bank.slot(), - full_snapshot_slot, - ); - exit(1); - } + bank.set_capitalization(); + + let bank = if let Some(warp_slot) = warp_slot { + // need to flush the write cache in order to use Storages to calculate + // the accounts hash, and need to root `bank` before flushing the cache + bank.rc.accounts.accounts_db.add_root(bank.slot()); + bank.force_flush_accounts_cache(); + Arc::new(Bank::warp_from_parent( + bank.clone(), + bank.collector_id(), + warp_slot, + CalcAccountsHashDataSource::Storages, + )) + } else { + bank + }; - let incremental_snapshot_archive_info = - snapshot_bank_utils::bank_to_incremental_snapshot_archive( - ledger_path, + let minimize_snapshot_possibly_incomplete = if is_minimized { + minimize_bank_for_snapshot( + &blockstore, &bank, - full_snapshot_slot, - Some(snapshot_version), - output_directory.clone(), - output_directory, - snapshot_archive_format, - maximum_full_snapshot_archives_to_retain, - maximum_incremental_snapshot_archives_to_retain, + snapshot_slot, + ending_slot.unwrap(), ) - .unwrap_or_else(|err| { - eprintln!("Unable to create incremental snapshot: {err}"); - exit(1); - }); + } else { + false + }; println!( - "Successfully created incremental snapshot for slot {}, hash {}, \ - base slot: {}: {}", + "Creating a version {} {}snapshot of slot {}", + snapshot_version, + snapshot_type_str, bank.slot(), - bank.hash(), - full_snapshot_slot, - incremental_snapshot_archive_info.path().display(), ); - } else { - let full_snapshot_archive_info = - snapshot_bank_utils::bank_to_full_snapshot_archive( - ledger_path, - &bank, - Some(snapshot_version), - output_directory.clone(), - output_directory, - snapshot_archive_format, - maximum_full_snapshot_archives_to_retain, - maximum_incremental_snapshot_archives_to_retain, - ) - .unwrap_or_else(|err| { - eprintln!("Unable to create snapshot: {err}"); + + if is_incremental { + if starting_snapshot_hashes.is_none() { + eprintln!( + "Unable to create incremental snapshot without a base full \ + snapshot" + ); exit(1); - }); + } + let full_snapshot_slot = starting_snapshot_hashes.unwrap().full.0 .0; + if bank.slot() <= full_snapshot_slot { + eprintln!( + "Unable to create incremental snapshot: Slot must be greater \ + than full snapshot slot. slot: {}, full snapshot slot: {}", + bank.slot(), + full_snapshot_slot, + ); + exit(1); + } - println!( - "Successfully created snapshot for slot {}, hash {}: {}", - bank.slot(), - bank.hash(), - full_snapshot_archive_info.path().display(), - ); + let incremental_snapshot_archive_info = + snapshot_bank_utils::bank_to_incremental_snapshot_archive( + ledger_path, + &bank, + full_snapshot_slot, + Some(snapshot_version), + output_directory.clone(), + output_directory, + snapshot_archive_format, + maximum_full_snapshot_archives_to_retain, + maximum_incremental_snapshot_archives_to_retain, + ) + .unwrap_or_else(|err| { + eprintln!("Unable to create incremental snapshot: {err}"); + exit(1); + }); - if is_minimized { - let starting_epoch = bank.epoch_schedule().get_epoch(snapshot_slot); - let ending_epoch = bank.epoch_schedule().get_epoch(ending_slot.unwrap()); - if starting_epoch != ending_epoch { - warn!( - "Minimized snapshot range crosses epoch boundary ({} to \ + println!( + "Successfully created incremental snapshot for slot {}, hash {}, \ + base slot: {}: {}", + bank.slot(), + bank.hash(), + full_snapshot_slot, + incremental_snapshot_archive_info.path().display(), + ); + } else { + let full_snapshot_archive_info = + snapshot_bank_utils::bank_to_full_snapshot_archive( + ledger_path, + &bank, + Some(snapshot_version), + output_directory.clone(), + output_directory, + snapshot_archive_format, + maximum_full_snapshot_archives_to_retain, + maximum_incremental_snapshot_archives_to_retain, + ) + .unwrap_or_else(|err| { + eprintln!("Unable to create snapshot: {err}"); + exit(1); + }); + + println!( + "Successfully created snapshot for slot {}, hash {}: {}", + bank.slot(), + bank.hash(), + full_snapshot_archive_info.path().display(), + ); + + if is_minimized { + let starting_epoch = bank.epoch_schedule().get_epoch(snapshot_slot); + let ending_epoch = + bank.epoch_schedule().get_epoch(ending_slot.unwrap()); + if starting_epoch != ending_epoch { + warn!( + "Minimized snapshot range crosses epoch boundary ({} to \ {}). Bank hashes after {} will not match replays from a \ full snapshot", - starting_epoch, - ending_epoch, - bank.epoch_schedule().get_last_slot_in_epoch(starting_epoch) - ); - } + starting_epoch, + ending_epoch, + bank.epoch_schedule().get_last_slot_in_epoch(starting_epoch) + ); + } - if minimize_snapshot_possibly_incomplete { - warn!( - "Minimized snapshot may be incomplete due to missing \ + if minimize_snapshot_possibly_incomplete { + warn!( + "Minimized snapshot may be incomplete due to missing \ accounts from CPI'd address lookup table extensions. \ This may lead to mismatched bank hashes while replaying." - ); + ); + } } } - } - - println!( - "Shred version: {}", - compute_shred_version(&genesis_config.hash(), Some(&bank.hard_forks())) - ); - } - ("accounts", Some(arg_matches)) => { - let halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok(); - let process_options = ProcessOptions { - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - halt_at_slot, - run_verification: false, - accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), - use_snapshot_archives_at_startup: value_t_or_exit!( - arg_matches, - use_snapshot_archives_at_startup::cli::NAME, - UseSnapshotArchivesAtStartup - ), - ..ProcessOptions::default() - }; - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let include_sysvars = arg_matches.is_present("include_sysvars"); - let blockstore = - open_blockstore(&ledger_path, arg_matches, get_access_type(&process_options)); - let (bank_forks, _) = load_and_process_ledger_or_exit( - arg_matches, - &genesis_config, - Arc::new(blockstore), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - ); - let bank = bank_forks.read().unwrap().working_bank(); - let mut serializer = serde_json::Serializer::new(stdout()); - let (summarize, mut json_serializer) = - match OutputFormat::from_matches(arg_matches, "output_format", false) { - OutputFormat::Json | OutputFormat::JsonCompact => { - (false, Some(serializer.serialize_seq(None).unwrap())) - } - _ => (true, None), + println!( + "Shred version: {}", + compute_shred_version(&genesis_config.hash(), Some(&bank.hard_forks())) + ); + } + ("accounts", Some(arg_matches)) => { + let halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok(); + let process_options = ProcessOptions { + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + halt_at_slot, + run_verification: false, + accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), + use_snapshot_archives_at_startup: value_t_or_exit!( + arg_matches, + use_snapshot_archives_at_startup::cli::NAME, + UseSnapshotArchivesAtStartup + ), + ..ProcessOptions::default() }; - let mut total_accounts_stats = TotalAccountsStats::default(); - let rent_collector = bank.rent_collector(); - let print_account_contents = !arg_matches.is_present("no_account_contents"); - let print_account_data = !arg_matches.is_present("no_account_data"); - let data_encoding = parse_encoding_format(arg_matches); - let cli_account_new_config = CliAccountNewConfig { - data_encoding, - ..CliAccountNewConfig::default() - }; - let scan_func = |some_account_tuple: Option<(&Pubkey, AccountSharedData, Slot)>| { - if let Some((pubkey, account, slot)) = some_account_tuple - .filter(|(_, account, _)| Accounts::is_loadable(account.lamports())) - { - if !include_sysvars && solana_sdk::sysvar::is_sysvar_id(pubkey) { - return; - } + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let include_sysvars = arg_matches.is_present("include_sysvars"); + let blockstore = open_blockstore( + &ledger_path, + arg_matches, + get_access_type(&process_options), + ); + let (bank_forks, _) = load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + Arc::new(blockstore), + process_options, + snapshot_archive_path, + incremental_snapshot_archive_path, + ); - total_accounts_stats.accumulate_account(pubkey, &account, rent_collector); + let bank = bank_forks.read().unwrap().working_bank(); + let mut serializer = serde_json::Serializer::new(stdout()); + let (summarize, mut json_serializer) = + match OutputFormat::from_matches(arg_matches, "output_format", false) { + OutputFormat::Json | OutputFormat::JsonCompact => { + (false, Some(serializer.serialize_seq(None).unwrap())) + } + _ => (true, None), + }; + let mut total_accounts_stats = TotalAccountsStats::default(); + let rent_collector = bank.rent_collector(); + let print_account_contents = !arg_matches.is_present("no_account_contents"); + let print_account_data = !arg_matches.is_present("no_account_data"); + let data_encoding = parse_encoding_format(arg_matches); + let cli_account_new_config = CliAccountNewConfig { + data_encoding, + ..CliAccountNewConfig::default() + }; + let scan_func = + |some_account_tuple: Option<(&Pubkey, AccountSharedData, Slot)>| { + if let Some((pubkey, account, slot)) = some_account_tuple + .filter(|(_, account, _)| Accounts::is_loadable(account.lamports())) + { + if !include_sysvars && solana_sdk::sysvar::is_sysvar_id(pubkey) { + return; + } - if print_account_contents { - if let Some(json_serializer) = json_serializer.as_mut() { - let cli_account = CliAccount::new_with_config( - pubkey, - &account, - &cli_account_new_config, - ); - json_serializer.serialize_element(&cli_account).unwrap(); - } else { - output_account( + total_accounts_stats.accumulate_account( pubkey, &account, - Some(slot), - print_account_data, - data_encoding, + rent_collector, ); + + if print_account_contents { + if let Some(json_serializer) = json_serializer.as_mut() { + let cli_account = CliAccount::new_with_config( + pubkey, + &account, + &cli_account_new_config, + ); + json_serializer.serialize_element(&cli_account).unwrap(); + } else { + output_account( + pubkey, + &account, + Some(slot), + print_account_data, + data_encoding, + ); + } + } } - } + }; + let mut measure = Measure::start("scanning accounts"); + bank.scan_all_accounts(scan_func).unwrap(); + measure.stop(); + info!("{}", measure); + if let Some(json_serializer) = json_serializer { + json_serializer.end().unwrap(); } - }; - let mut measure = Measure::start("scanning accounts"); - bank.scan_all_accounts(scan_func).unwrap(); - measure.stop(); - info!("{}", measure); - if let Some(json_serializer) = json_serializer { - json_serializer.end().unwrap(); - } - if summarize { - println!("\n{total_accounts_stats:#?}"); - } - } - ("capitalization", Some(arg_matches)) => { - let halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok(); - let process_options = ProcessOptions { - new_hard_forks: hardforks_of(arg_matches, "hard_forks"), - halt_at_slot, - run_verification: false, - accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), - use_snapshot_archives_at_startup: value_t_or_exit!( - arg_matches, - use_snapshot_archives_at_startup::cli::NAME, - UseSnapshotArchivesAtStartup - ), - ..ProcessOptions::default() - }; - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); - let blockstore = - open_blockstore(&ledger_path, arg_matches, get_access_type(&process_options)); - let (bank_forks, _) = load_and_process_ledger_or_exit( - arg_matches, - &genesis_config, - Arc::new(blockstore), - process_options, - snapshot_archive_path, - incremental_snapshot_archive_path, - ); - let bank_forks = bank_forks.read().unwrap(); - let slot = bank_forks.working_bank().slot(); - let bank = bank_forks.get(slot).unwrap_or_else(|| { - eprintln!("Error: Slot {slot} is not available"); - exit(1); - }); - - if arg_matches.is_present("recalculate_capitalization") { - println!("Recalculating capitalization"); - let old_capitalization = bank.set_capitalization(); - if old_capitalization == bank.capitalization() { - eprintln!("Capitalization was identical: {}", Sol(old_capitalization)); + if summarize { + println!("\n{total_accounts_stats:#?}"); } } - - if arg_matches.is_present("warp_epoch") { - let base_bank = bank; - - let raw_warp_epoch = value_t!(arg_matches, "warp_epoch", String).unwrap(); - let warp_epoch = if raw_warp_epoch.starts_with('+') { - base_bank.epoch() + value_t!(arg_matches, "warp_epoch", Epoch).unwrap() - } else { - value_t!(arg_matches, "warp_epoch", Epoch).unwrap() + ("capitalization", Some(arg_matches)) => { + let halt_at_slot = value_t!(arg_matches, "halt_at_slot", Slot).ok(); + let process_options = ProcessOptions { + new_hard_forks: hardforks_of(arg_matches, "hard_forks"), + halt_at_slot, + run_verification: false, + accounts_db_config: Some(get_accounts_db_config(&ledger_path, arg_matches)), + use_snapshot_archives_at_startup: value_t_or_exit!( + arg_matches, + use_snapshot_archives_at_startup::cli::NAME, + UseSnapshotArchivesAtStartup + ), + ..ProcessOptions::default() }; - if warp_epoch < base_bank.epoch() { - eprintln!( - "Error: can't warp epoch backwards: {} => {}", - base_bank.epoch(), - warp_epoch - ); + let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); + let blockstore = open_blockstore( + &ledger_path, + arg_matches, + get_access_type(&process_options), + ); + let (bank_forks, _) = load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + Arc::new(blockstore), + process_options, + snapshot_archive_path, + incremental_snapshot_archive_path, + ); + let bank_forks = bank_forks.read().unwrap(); + let slot = bank_forks.working_bank().slot(); + let bank = bank_forks.get(slot).unwrap_or_else(|| { + eprintln!("Error: Slot {slot} is not available"); exit(1); - } + }); - if let Ok(raw_inflation) = value_t!(arg_matches, "inflation", String) { - let inflation = match raw_inflation.as_str() { - "pico" => Inflation::pico(), - "full" => Inflation::full(), - "none" => Inflation::new_disabled(), - _ => unreachable!(), - }; - println!( - "Forcing to: {:?} (was: {:?})", - inflation, - base_bank.inflation() - ); - base_bank.set_inflation(inflation); + if arg_matches.is_present("recalculate_capitalization") { + println!("Recalculating capitalization"); + let old_capitalization = bank.set_capitalization(); + if old_capitalization == bank.capitalization() { + eprintln!("Capitalization was identical: {}", Sol(old_capitalization)); + } } - let next_epoch = base_bank - .epoch_schedule() - .get_first_slot_in_epoch(warp_epoch); - // disable eager rent collection because this creates many unrelated - // rent collection account updates - base_bank - .lazy_rent_collection - .store(true, std::sync::atomic::Ordering::Relaxed); - - let feature_account_balance = - std::cmp::max(genesis_config.rent.minimum_balance(Feature::size_of()), 1); - if arg_matches.is_present("enable_credits_auto_rewind") { - base_bank.unfreeze_for_ledger_tool(); - let mut force_enabled_count = 0; - if base_bank - .get_account(&feature_set::credits_auto_rewind::id()) - .is_none() - { - base_bank.store_account( - &feature_set::credits_auto_rewind::id(), - &feature::create_account( - &Feature { activated_at: None }, - feature_account_balance, - ), + if arg_matches.is_present("warp_epoch") { + let base_bank = bank; + + let raw_warp_epoch = value_t!(arg_matches, "warp_epoch", String).unwrap(); + let warp_epoch = if raw_warp_epoch.starts_with('+') { + base_bank.epoch() + value_t!(arg_matches, "warp_epoch", Epoch).unwrap() + } else { + value_t!(arg_matches, "warp_epoch", Epoch).unwrap() + }; + if warp_epoch < base_bank.epoch() { + eprintln!( + "Error: can't warp epoch backwards: {} => {}", + base_bank.epoch(), + warp_epoch ); - force_enabled_count += 1; + exit(1); } - if force_enabled_count == 0 { - warn!("Already credits_auto_rewind is activated (or scheduled)"); + + if let Ok(raw_inflation) = value_t!(arg_matches, "inflation", String) { + let inflation = match raw_inflation.as_str() { + "pico" => Inflation::pico(), + "full" => Inflation::full(), + "none" => Inflation::new_disabled(), + _ => unreachable!(), + }; + println!( + "Forcing to: {:?} (was: {:?})", + inflation, + base_bank.inflation() + ); + base_bank.set_inflation(inflation); } - let mut store_failed_count = 0; - if force_enabled_count >= 1 { + + let next_epoch = base_bank + .epoch_schedule() + .get_first_slot_in_epoch(warp_epoch); + // disable eager rent collection because this creates many unrelated + // rent collection account updates + base_bank + .lazy_rent_collection + .store(true, std::sync::atomic::Ordering::Relaxed); + + let feature_account_balance = std::cmp::max( + genesis_config.rent.minimum_balance(Feature::size_of()), + 1, + ); + if arg_matches.is_present("enable_credits_auto_rewind") { + base_bank.unfreeze_for_ledger_tool(); + let mut force_enabled_count = 0; if base_bank - .get_account(&feature_set::deprecate_rewards_sysvar::id()) - .is_some() + .get_account(&feature_set::credits_auto_rewind::id()) + .is_none() { - // steal some lamports from the pretty old feature not to affect - // capitalizaion, which doesn't affect inflation behavior! base_bank.store_account( - &feature_set::deprecate_rewards_sysvar::id(), - &AccountSharedData::default(), + &feature_set::credits_auto_rewind::id(), + &feature::create_account( + &Feature { activated_at: None }, + feature_account_balance, + ), ); - force_enabled_count -= 1; - } else { - store_failed_count += 1; + force_enabled_count += 1; } - } - assert_eq!(force_enabled_count, store_failed_count); - if store_failed_count >= 1 { - // we have no choice; maybe locally created blank cluster with - // not-Development cluster type. - let old_cap = base_bank.set_capitalization(); - let new_cap = base_bank.capitalization(); - warn!( - "Skewing capitalization a bit to enable \ + if force_enabled_count == 0 { + warn!("Already credits_auto_rewind is activated (or scheduled)"); + } + let mut store_failed_count = 0; + if force_enabled_count >= 1 { + if base_bank + .get_account(&feature_set::deprecate_rewards_sysvar::id()) + .is_some() + { + // steal some lamports from the pretty old feature not to affect + // capitalizaion, which doesn't affect inflation behavior! + base_bank.store_account( + &feature_set::deprecate_rewards_sysvar::id(), + &AccountSharedData::default(), + ); + force_enabled_count -= 1; + } else { + store_failed_count += 1; + } + } + assert_eq!(force_enabled_count, store_failed_count); + if store_failed_count >= 1 { + // we have no choice; maybe locally created blank cluster with + // not-Development cluster type. + let old_cap = base_bank.set_capitalization(); + let new_cap = base_bank.capitalization(); + warn!( + "Skewing capitalization a bit to enable \ credits_auto_rewind as requested: increasing {} from {} \ to {}", - feature_account_balance, old_cap, new_cap, - ); - assert_eq!( - old_cap + feature_account_balance * store_failed_count, - new_cap - ); + feature_account_balance, old_cap, new_cap, + ); + assert_eq!( + old_cap + feature_account_balance * store_failed_count, + new_cap + ); + } } - } - #[derive(Default, Debug)] - struct PointDetail { - epoch: Epoch, - points: u128, - stake: u128, - credits: u128, - } + #[derive(Default, Debug)] + struct PointDetail { + epoch: Epoch, + points: u128, + stake: u128, + credits: u128, + } - #[derive(Default, Debug)] - struct CalculationDetail { - epochs: usize, - voter: Pubkey, - voter_owner: Pubkey, - current_effective_stake: u64, - total_stake: u64, - rent_exempt_reserve: u64, - points: Vec, - base_rewards: u64, - commission: u8, - vote_rewards: u64, - stake_rewards: u64, - activation_epoch: Epoch, - deactivation_epoch: Option, - point_value: Option, - old_credits_observed: Option, - new_credits_observed: Option, - skipped_reasons: String, - } - use solana_stake_program::stake_state::InflationPointCalculationEvent; - let stake_calculation_details: DashMap = - DashMap::new(); - let last_point_value = Arc::new(RwLock::new(None)); - let tracer = |event: &RewardCalculationEvent| { - // Currently RewardCalculationEvent enum has only Staking variant - // because only staking tracing is supported! - #[allow(irrefutable_let_patterns)] - if let RewardCalculationEvent::Staking(pubkey, event) = event { - let mut detail = stake_calculation_details.entry(**pubkey).or_default(); - match event { + #[derive(Default, Debug)] + struct CalculationDetail { + epochs: usize, + voter: Pubkey, + voter_owner: Pubkey, + current_effective_stake: u64, + total_stake: u64, + rent_exempt_reserve: u64, + points: Vec, + base_rewards: u64, + commission: u8, + vote_rewards: u64, + stake_rewards: u64, + activation_epoch: Epoch, + deactivation_epoch: Option, + point_value: Option, + old_credits_observed: Option, + new_credits_observed: Option, + skipped_reasons: String, + } + use solana_stake_program::stake_state::InflationPointCalculationEvent; + let stake_calculation_details: DashMap = + DashMap::new(); + let last_point_value = Arc::new(RwLock::new(None)); + let tracer = |event: &RewardCalculationEvent| { + // Currently RewardCalculationEvent enum has only Staking variant + // because only staking tracing is supported! + #[allow(irrefutable_let_patterns)] + if let RewardCalculationEvent::Staking(pubkey, event) = event { + let mut detail = + stake_calculation_details.entry(**pubkey).or_default(); + match event { InflationPointCalculationEvent::CalculatedPoints( epoch, stake, @@ -3319,285 +3373,304 @@ fn main() { } } } - } - }; - let warped_bank = Bank::new_from_parent_with_tracer( - base_bank.clone(), - base_bank.collector_id(), - next_epoch, - tracer, - ); - warped_bank.freeze(); - let mut csv_writer = if arg_matches.is_present("csv_filename") { - let csv_filename = value_t_or_exit!(arg_matches, "csv_filename", String); - let file = File::create(csv_filename).unwrap(); - Some(csv::WriterBuilder::new().from_writer(file)) - } else { - None - }; - - println!("Slot: {} => {}", base_bank.slot(), warped_bank.slot()); - println!("Epoch: {} => {}", base_bank.epoch(), warped_bank.epoch()); - assert_capitalization(&base_bank); - assert_capitalization(&warped_bank); - let interest_per_epoch = ((warped_bank.capitalization() as f64) - / (base_bank.capitalization() as f64) - * 100_f64) - - 100_f64; - let interest_per_year = - interest_per_epoch / warped_bank.epoch_duration_in_years(base_bank.epoch()); - println!( - "Capitalization: {} => {} (+{} {}%; annualized {}%)", - Sol(base_bank.capitalization()), - Sol(warped_bank.capitalization()), - Sol(warped_bank.capitalization() - base_bank.capitalization()), - interest_per_epoch, - interest_per_year, - ); + } + }; + let warped_bank = Bank::new_from_parent_with_tracer( + base_bank.clone(), + base_bank.collector_id(), + next_epoch, + tracer, + ); + warped_bank.freeze(); + let mut csv_writer = if arg_matches.is_present("csv_filename") { + let csv_filename = + value_t_or_exit!(arg_matches, "csv_filename", String); + let file = File::create(csv_filename).unwrap(); + Some(csv::WriterBuilder::new().from_writer(file)) + } else { + None + }; - let mut overall_delta = 0; + println!("Slot: {} => {}", base_bank.slot(), warped_bank.slot()); + println!("Epoch: {} => {}", base_bank.epoch(), warped_bank.epoch()); + assert_capitalization(&base_bank); + assert_capitalization(&warped_bank); + let interest_per_epoch = ((warped_bank.capitalization() as f64) + / (base_bank.capitalization() as f64) + * 100_f64) + - 100_f64; + let interest_per_year = interest_per_epoch + / warped_bank.epoch_duration_in_years(base_bank.epoch()); + println!( + "Capitalization: {} => {} (+{} {}%; annualized {}%)", + Sol(base_bank.capitalization()), + Sol(warped_bank.capitalization()), + Sol(warped_bank.capitalization() - base_bank.capitalization()), + interest_per_epoch, + interest_per_year, + ); - let modified_accounts = warped_bank.get_all_accounts_modified_since_parent(); - let mut rewarded_accounts = modified_accounts - .iter() - .map(|(pubkey, account)| { - ( - pubkey, - account, - base_bank - .get_account(pubkey) - .map(|a| a.lamports()) - .unwrap_or_default(), - ) - }) - .collect::>(); - rewarded_accounts.sort_unstable_by_key(|(pubkey, account, base_lamports)| { - ( - *account.owner(), - *base_lamports, - account.lamports() - base_lamports, - *pubkey, - ) - }); + let mut overall_delta = 0; - let mut unchanged_accounts = stake_calculation_details - .iter() - .map(|entry| *entry.key()) - .collect::>() - .difference( - &rewarded_accounts - .iter() - .map(|(pubkey, ..)| **pubkey) - .collect(), - ) - .map(|pubkey| (*pubkey, warped_bank.get_account(pubkey).unwrap())) - .collect::>(); - unchanged_accounts.sort_unstable_by_key(|(pubkey, account)| { - (*account.owner(), account.lamports(), *pubkey) - }); - let unchanged_accounts = unchanged_accounts.into_iter(); + let modified_accounts = + warped_bank.get_all_accounts_modified_since_parent(); + let mut rewarded_accounts = modified_accounts + .iter() + .map(|(pubkey, account)| { + ( + pubkey, + account, + base_bank + .get_account(pubkey) + .map(|a| a.lamports()) + .unwrap_or_default(), + ) + }) + .collect::>(); + rewarded_accounts.sort_unstable_by_key( + |(pubkey, account, base_lamports)| { + ( + *account.owner(), + *base_lamports, + account.lamports() - base_lamports, + *pubkey, + ) + }, + ); - let rewarded_accounts = rewarded_accounts - .into_iter() - .map(|(pubkey, account, ..)| (*pubkey, account.clone())); - - let all_accounts = unchanged_accounts.chain(rewarded_accounts); - for (pubkey, warped_account) in all_accounts { - // Don't output sysvars; it's always updated but not related to - // inflation. - if solana_sdk::sysvar::is_sysvar_id(&pubkey) { - continue; - } + let mut unchanged_accounts = stake_calculation_details + .iter() + .map(|entry| *entry.key()) + .collect::>() + .difference( + &rewarded_accounts + .iter() + .map(|(pubkey, ..)| **pubkey) + .collect(), + ) + .map(|pubkey| (*pubkey, warped_bank.get_account(pubkey).unwrap())) + .collect::>(); + unchanged_accounts.sort_unstable_by_key(|(pubkey, account)| { + (*account.owner(), account.lamports(), *pubkey) + }); + let unchanged_accounts = unchanged_accounts.into_iter(); + + let rewarded_accounts = rewarded_accounts + .into_iter() + .map(|(pubkey, account, ..)| (*pubkey, account.clone())); + + let all_accounts = unchanged_accounts.chain(rewarded_accounts); + for (pubkey, warped_account) in all_accounts { + // Don't output sysvars; it's always updated but not related to + // inflation. + if solana_sdk::sysvar::is_sysvar_id(&pubkey) { + continue; + } - if let Some(base_account) = base_bank.get_account(&pubkey) { - let delta = warped_account.lamports() - base_account.lamports(); - let detail_ref = stake_calculation_details.get(&pubkey); - let detail: Option<&CalculationDetail> = - detail_ref.as_ref().map(|detail_ref| detail_ref.value()); - println!( - "{:<45}({}): {} => {} (+{} {:>4.9}%) {:?}", - format!("{pubkey}"), // format! is needed to pad/justify correctly. - base_account.owner(), - Sol(base_account.lamports()), - Sol(warped_account.lamports()), - Sol(delta), - ((warped_account.lamports() as f64) - / (base_account.lamports() as f64) - * 100_f64) - - 100_f64, - detail, - ); - if let Some(ref mut csv_writer) = csv_writer { - #[derive(Serialize)] - struct InflationRecord { - cluster_type: String, - rewarded_epoch: Epoch, - account: String, - owner: String, - old_balance: u64, - new_balance: u64, - data_size: usize, - delegation: String, - delegation_owner: String, - effective_stake: String, - delegated_stake: String, - rent_exempt_reserve: String, - activation_epoch: String, - deactivation_epoch: String, - earned_epochs: String, - epoch: String, - epoch_credits: String, - epoch_points: String, - epoch_stake: String, - old_credits_observed: String, - new_credits_observed: String, - base_rewards: String, - stake_rewards: String, - vote_rewards: String, - commission: String, - cluster_rewards: String, - cluster_points: String, - old_capitalization: u64, - new_capitalization: u64, - } - fn format_or_na(data: Option) -> String { - data.map(|data| format!("{data}")) - .unwrap_or_else(|| "N/A".to_owned()) - } - let mut point_details = detail - .map(|d| d.points.iter().map(Some).collect::>()) - .unwrap_or_default(); + if let Some(base_account) = base_bank.get_account(&pubkey) { + let delta = warped_account.lamports() - base_account.lamports(); + let detail_ref = stake_calculation_details.get(&pubkey); + let detail: Option<&CalculationDetail> = + detail_ref.as_ref().map(|detail_ref| detail_ref.value()); + println!( + "{:<45}({}): {} => {} (+{} {:>4.9}%) {:?}", + format!("{pubkey}"), // format! is needed to pad/justify correctly. + base_account.owner(), + Sol(base_account.lamports()), + Sol(warped_account.lamports()), + Sol(delta), + ((warped_account.lamports() as f64) + / (base_account.lamports() as f64) + * 100_f64) + - 100_f64, + detail, + ); + if let Some(ref mut csv_writer) = csv_writer { + #[derive(Serialize)] + struct InflationRecord { + cluster_type: String, + rewarded_epoch: Epoch, + account: String, + owner: String, + old_balance: u64, + new_balance: u64, + data_size: usize, + delegation: String, + delegation_owner: String, + effective_stake: String, + delegated_stake: String, + rent_exempt_reserve: String, + activation_epoch: String, + deactivation_epoch: String, + earned_epochs: String, + epoch: String, + epoch_credits: String, + epoch_points: String, + epoch_stake: String, + old_credits_observed: String, + new_credits_observed: String, + base_rewards: String, + stake_rewards: String, + vote_rewards: String, + commission: String, + cluster_rewards: String, + cluster_points: String, + old_capitalization: u64, + new_capitalization: u64, + } + fn format_or_na( + data: Option, + ) -> String { + data.map(|data| format!("{data}")) + .unwrap_or_else(|| "N/A".to_owned()) + } + let mut point_details = detail + .map(|d| d.points.iter().map(Some).collect::>()) + .unwrap_or_default(); - // ensure to print even if there is no calculation/point detail - if point_details.is_empty() { - point_details.push(None); - } + // ensure to print even if there is no calculation/point detail + if point_details.is_empty() { + point_details.push(None); + } - for point_detail in point_details { - let (cluster_rewards, cluster_points) = last_point_value - .read() - .unwrap() - .clone() - .map_or((None, None), |pv| { - (Some(pv.rewards), Some(pv.points)) - }); - let record = InflationRecord { - cluster_type: format!("{:?}", base_bank.cluster_type()), - rewarded_epoch: base_bank.epoch(), - account: format!("{pubkey}"), - owner: format!("{}", base_account.owner()), - old_balance: base_account.lamports(), - new_balance: warped_account.lamports(), - data_size: base_account.data().len(), - delegation: format_or_na(detail.map(|d| d.voter)), - delegation_owner: format_or_na( - detail.map(|d| d.voter_owner), - ), - effective_stake: format_or_na( - detail.map(|d| d.current_effective_stake), - ), - delegated_stake: format_or_na( - detail.map(|d| d.total_stake), - ), - rent_exempt_reserve: format_or_na( - detail.map(|d| d.rent_exempt_reserve), - ), - activation_epoch: format_or_na(detail.map(|d| { - if d.activation_epoch < Epoch::max_value() { - d.activation_epoch - } else { - // bootstraped - 0 - } - })), - deactivation_epoch: format_or_na( - detail.and_then(|d| d.deactivation_epoch), - ), - earned_epochs: format_or_na(detail.map(|d| d.epochs)), - epoch: format_or_na(point_detail.map(|d| d.epoch)), - epoch_credits: format_or_na( - point_detail.map(|d| d.credits), - ), - epoch_points: format_or_na(point_detail.map(|d| d.points)), - epoch_stake: format_or_na(point_detail.map(|d| d.stake)), - old_credits_observed: format_or_na( - detail.and_then(|d| d.old_credits_observed), - ), - new_credits_observed: format_or_na( - detail.and_then(|d| d.new_credits_observed), - ), - base_rewards: format_or_na(detail.map(|d| d.base_rewards)), - stake_rewards: format_or_na( - detail.map(|d| d.stake_rewards), - ), - vote_rewards: format_or_na(detail.map(|d| d.vote_rewards)), - commission: format_or_na(detail.map(|d| d.commission)), - cluster_rewards: format_or_na(cluster_rewards), - cluster_points: format_or_na(cluster_points), - old_capitalization: base_bank.capitalization(), - new_capitalization: warped_bank.capitalization(), - }; - csv_writer.serialize(&record).unwrap(); + for point_detail in point_details { + let (cluster_rewards, cluster_points) = last_point_value + .read() + .unwrap() + .clone() + .map_or((None, None), |pv| { + (Some(pv.rewards), Some(pv.points)) + }); + let record = InflationRecord { + cluster_type: format!("{:?}", base_bank.cluster_type()), + rewarded_epoch: base_bank.epoch(), + account: format!("{pubkey}"), + owner: format!("{}", base_account.owner()), + old_balance: base_account.lamports(), + new_balance: warped_account.lamports(), + data_size: base_account.data().len(), + delegation: format_or_na(detail.map(|d| d.voter)), + delegation_owner: format_or_na( + detail.map(|d| d.voter_owner), + ), + effective_stake: format_or_na( + detail.map(|d| d.current_effective_stake), + ), + delegated_stake: format_or_na( + detail.map(|d| d.total_stake), + ), + rent_exempt_reserve: format_or_na( + detail.map(|d| d.rent_exempt_reserve), + ), + activation_epoch: format_or_na(detail.map(|d| { + if d.activation_epoch < Epoch::max_value() { + d.activation_epoch + } else { + // bootstraped + 0 + } + })), + deactivation_epoch: format_or_na( + detail.and_then(|d| d.deactivation_epoch), + ), + earned_epochs: format_or_na(detail.map(|d| d.epochs)), + epoch: format_or_na(point_detail.map(|d| d.epoch)), + epoch_credits: format_or_na( + point_detail.map(|d| d.credits), + ), + epoch_points: format_or_na( + point_detail.map(|d| d.points), + ), + epoch_stake: format_or_na( + point_detail.map(|d| d.stake), + ), + old_credits_observed: format_or_na( + detail.and_then(|d| d.old_credits_observed), + ), + new_credits_observed: format_or_na( + detail.and_then(|d| d.new_credits_observed), + ), + base_rewards: format_or_na( + detail.map(|d| d.base_rewards), + ), + stake_rewards: format_or_na( + detail.map(|d| d.stake_rewards), + ), + vote_rewards: format_or_na( + detail.map(|d| d.vote_rewards), + ), + commission: format_or_na(detail.map(|d| d.commission)), + cluster_rewards: format_or_na(cluster_rewards), + cluster_points: format_or_na(cluster_points), + old_capitalization: base_bank.capitalization(), + new_capitalization: warped_bank.capitalization(), + }; + csv_writer.serialize(&record).unwrap(); + } } + overall_delta += delta; + } else { + error!("new account!?: {}", pubkey); } - overall_delta += delta; - } else { - error!("new account!?: {}", pubkey); } - } - if overall_delta > 0 { - println!("Sum of lamports changes: {}", Sol(overall_delta)); - } - } else { - if arg_matches.is_present("recalculate_capitalization") { - eprintln!("Capitalization isn't verified because it's recalculated"); - } - if arg_matches.is_present("inflation") { - eprintln!("Forcing inflation isn't meaningful because bank isn't warping"); - } + if overall_delta > 0 { + println!("Sum of lamports changes: {}", Sol(overall_delta)); + } + } else { + if arg_matches.is_present("recalculate_capitalization") { + eprintln!("Capitalization isn't verified because it's recalculated"); + } + if arg_matches.is_present("inflation") { + eprintln!( + "Forcing inflation isn't meaningful because bank isn't warping" + ); + } - assert_capitalization(&bank); - println!("Inflation: {:?}", bank.inflation()); - println!("RentCollector: {:?}", bank.rent_collector()); - println!("Capitalization: {}", Sol(bank.capitalization())); - } - } - ("purge", Some(arg_matches)) => { - let start_slot = value_t_or_exit!(arg_matches, "start_slot", Slot); - let end_slot = value_t!(arg_matches, "end_slot", Slot).ok(); - let perform_compaction = arg_matches.is_present("enable_compaction"); - if arg_matches.is_present("no_compaction") { - warn!("--no-compaction is deprecated and is now the default behavior."); + assert_capitalization(&bank); + println!("Inflation: {:?}", bank.inflation()); + println!("RentCollector: {:?}", bank.rent_collector()); + println!("Capitalization: {}", Sol(bank.capitalization())); + } } - let dead_slots_only = arg_matches.is_present("dead_slots_only"); - let batch_size = value_t_or_exit!(arg_matches, "batch_size", usize); + ("purge", Some(arg_matches)) => { + let start_slot = value_t_or_exit!(arg_matches, "start_slot", Slot); + let end_slot = value_t!(arg_matches, "end_slot", Slot).ok(); + let perform_compaction = arg_matches.is_present("enable_compaction"); + if arg_matches.is_present("no_compaction") { + warn!("--no-compaction is deprecated and is now the default behavior."); + } + let dead_slots_only = arg_matches.is_present("dead_slots_only"); + let batch_size = value_t_or_exit!(arg_matches, "batch_size", usize); - let blockstore = - open_blockstore(&ledger_path, arg_matches, AccessType::PrimaryForMaintenance); + let blockstore = open_blockstore( + &ledger_path, + arg_matches, + AccessType::PrimaryForMaintenance, + ); - let end_slot = match end_slot { - Some(end_slot) => end_slot, - None => match blockstore.slot_meta_iterator(start_slot) { - Ok(metas) => { - let slots: Vec<_> = metas.map(|(slot, _)| slot).collect(); - if slots.is_empty() { - eprintln!("Purge range is empty"); + let end_slot = match end_slot { + Some(end_slot) => end_slot, + None => match blockstore.slot_meta_iterator(start_slot) { + Ok(metas) => { + let slots: Vec<_> = metas.map(|(slot, _)| slot).collect(); + if slots.is_empty() { + eprintln!("Purge range is empty"); + exit(1); + } + *slots.last().unwrap() + } + Err(err) => { + eprintln!("Unable to read the Ledger: {err:?}"); exit(1); } - *slots.last().unwrap() - } - Err(err) => { - eprintln!("Unable to read the Ledger: {err:?}"); - exit(1); - } - }, - }; + }, + }; - if end_slot < start_slot { - eprintln!("end slot {end_slot} is less than start slot {start_slot}"); - exit(1); - } - info!( + if end_slot < start_slot { + eprintln!("end slot {end_slot} is less than start slot {start_slot}"); + exit(1); + } + info!( "Purging data from slots {} to {} ({} slots) (do compaction: {}) (dead slot \ only: {})", start_slot, @@ -3606,239 +3679,254 @@ fn main() { perform_compaction, dead_slots_only, ); - let purge_from_blockstore = |start_slot, end_slot| { - blockstore.purge_from_next_slots(start_slot, end_slot); - if perform_compaction { - blockstore.purge_and_compact_slots(start_slot, end_slot); - } else { - blockstore.purge_slots(start_slot, end_slot, PurgeType::Exact); - } - }; - if !dead_slots_only { - let slots_iter = &(start_slot..=end_slot).chunks(batch_size); - for slots in slots_iter { - let slots = slots.collect::>(); - assert!(!slots.is_empty()); - - let start_slot = *slots.first().unwrap(); - let end_slot = *slots.last().unwrap(); - info!( - "Purging chunked slots from {} to {} ({} slots)", - start_slot, - end_slot, - end_slot - start_slot - ); - purge_from_blockstore(start_slot, end_slot); - } - } else { - let dead_slots_iter = blockstore - .dead_slots_iterator(start_slot) - .unwrap() - .take_while(|s| *s <= end_slot); - for dead_slot in dead_slots_iter { - info!("Purging dead slot {}", dead_slot); - purge_from_blockstore(dead_slot, dead_slot); - } - } - } - ("list-roots", Some(arg_matches)) => { - let blockstore = open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); - - let max_height = value_t!(arg_matches, "max_height", usize).unwrap_or(usize::MAX); - let start_root = value_t!(arg_matches, "start_root", Slot).unwrap_or(0); - let num_roots = value_t_or_exit!(arg_matches, "num_roots", usize); - - let iter = blockstore - .rooted_slot_iterator(start_root) - .expect("Failed to get rooted slot"); - - let mut output: Box = - if let Some(path) = arg_matches.value_of("slot_list") { - match File::create(path) { - Ok(file) => Box::new(file), - _ => Box::new(stdout()), + let purge_from_blockstore = |start_slot, end_slot| { + blockstore.purge_from_next_slots(start_slot, end_slot); + if perform_compaction { + blockstore.purge_and_compact_slots(start_slot, end_slot); + } else { + blockstore.purge_slots(start_slot, end_slot, PurgeType::Exact); } - } else { - Box::new(stdout()) }; - - iter.take(num_roots) - .take_while(|slot| *slot <= max_height as u64) - .collect::>() - .into_iter() - .rev() - .for_each(|slot| { - let blockhash = blockstore - .get_slot_entries(slot, 0) - .unwrap() - .last() + if !dead_slots_only { + let slots_iter = &(start_slot..=end_slot).chunks(batch_size); + for slots in slots_iter { + let slots = slots.collect::>(); + assert!(!slots.is_empty()); + + let start_slot = *slots.first().unwrap(); + let end_slot = *slots.last().unwrap(); + info!( + "Purging chunked slots from {} to {} ({} slots)", + start_slot, + end_slot, + end_slot - start_slot + ); + purge_from_blockstore(start_slot, end_slot); + } + } else { + let dead_slots_iter = blockstore + .dead_slots_iterator(start_slot) .unwrap() - .hash; - - writeln!(output, "{slot}: {blockhash:?}").expect("failed to write"); - }); - } - ("latest-optimistic-slots", Some(arg_matches)) => { - let blockstore = open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); - let num_slots = value_t_or_exit!(arg_matches, "num_slots", usize); - let exclude_vote_only_slots = arg_matches.is_present("exclude_vote_only_slots"); - let slots = - get_latest_optimistic_slots(&blockstore, num_slots, exclude_vote_only_slots); - - println!( - "{:>20} {:>44} {:>32} {:>13}", - "Slot", "Hash", "Timestamp", "Vote Only?" - ); - for (slot, hash_and_timestamp_opt, contains_nonvote) in slots.iter() { - let (time_str, hash_str) = - if let Some((hash, timestamp)) = hash_and_timestamp_opt { - let secs: u64 = (timestamp / 1_000) as u64; - let nanos: u32 = ((timestamp % 1_000) * 1_000_000) as u32; - let t = UNIX_EPOCH + Duration::new(secs, nanos); - let datetime: DateTime = t.into(); - - (datetime.to_rfc3339(), format!("{hash}")) + .take_while(|s| *s <= end_slot); + for dead_slot in dead_slots_iter { + info!("Purging dead slot {}", dead_slot); + purge_from_blockstore(dead_slot, dead_slot); + } + } + } + ("list-roots", Some(arg_matches)) => { + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + + let max_height = + value_t!(arg_matches, "max_height", usize).unwrap_or(usize::MAX); + let start_root = value_t!(arg_matches, "start_root", Slot).unwrap_or(0); + let num_roots = value_t_or_exit!(arg_matches, "num_roots", usize); + + let iter = blockstore + .rooted_slot_iterator(start_root) + .expect("Failed to get rooted slot"); + + let mut output: Box = + if let Some(path) = arg_matches.value_of("slot_list") { + match File::create(path) { + Ok(file) => Box::new(file), + _ => Box::new(stdout()), + } } else { - let unknown = "Unknown"; - (String::from(unknown), String::from(unknown)) + Box::new(stdout()) }; + + iter.take(num_roots) + .take_while(|slot| *slot <= max_height as u64) + .collect::>() + .into_iter() + .rev() + .for_each(|slot| { + let blockhash = blockstore + .get_slot_entries(slot, 0) + .unwrap() + .last() + .unwrap() + .hash; + + writeln!(output, "{slot}: {blockhash:?}").expect("failed to write"); + }); + } + ("latest-optimistic-slots", Some(arg_matches)) => { + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + let num_slots = value_t_or_exit!(arg_matches, "num_slots", usize); + let exclude_vote_only_slots = arg_matches.is_present("exclude_vote_only_slots"); + let slots = get_latest_optimistic_slots( + &blockstore, + num_slots, + exclude_vote_only_slots, + ); + println!( "{:>20} {:>44} {:>32} {:>13}", - slot, &hash_str, &time_str, !contains_nonvote + "Slot", "Hash", "Timestamp", "Vote Only?" ); + for (slot, hash_and_timestamp_opt, contains_nonvote) in slots.iter() { + let (time_str, hash_str) = + if let Some((hash, timestamp)) = hash_and_timestamp_opt { + let secs: u64 = (timestamp / 1_000) as u64; + let nanos: u32 = ((timestamp % 1_000) * 1_000_000) as u32; + let t = UNIX_EPOCH + Duration::new(secs, nanos); + let datetime: DateTime = t.into(); + + (datetime.to_rfc3339(), format!("{hash}")) + } else { + let unknown = "Unknown"; + (String::from(unknown), String::from(unknown)) + }; + println!( + "{:>20} {:>44} {:>32} {:>13}", + slot, &hash_str, &time_str, !contains_nonvote + ); + } } - } - ("repair-roots", Some(arg_matches)) => { - let blockstore = open_blockstore(&ledger_path, arg_matches, AccessType::Primary); - - let start_root = value_t!(arg_matches, "start_root", Slot) - .unwrap_or_else(|_| blockstore.max_root()); - let max_slots = value_t_or_exit!(arg_matches, "max_slots", u64); - let end_root = value_t!(arg_matches, "end_root", Slot) - .unwrap_or_else(|_| start_root.saturating_sub(max_slots)); - assert!(start_root > end_root); - let num_slots = start_root - end_root - 1; // Adjust by one since start_root need not be checked - if arg_matches.is_present("end_root") && num_slots > max_slots { - eprintln!( + ("repair-roots", Some(arg_matches)) => { + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Primary); + + let start_root = value_t!(arg_matches, "start_root", Slot) + .unwrap_or_else(|_| blockstore.max_root()); + let max_slots = value_t_or_exit!(arg_matches, "max_slots", u64); + let end_root = value_t!(arg_matches, "end_root", Slot) + .unwrap_or_else(|_| start_root.saturating_sub(max_slots)); + assert!(start_root > end_root); + let num_slots = start_root - end_root - 1; // Adjust by one since start_root need not be checked + if arg_matches.is_present("end_root") && num_slots > max_slots { + eprintln!( "Requested range {num_slots} too large, max {max_slots}. Either adjust \ `--until` value, or pass a larger `--repair-limit` to override the limit", ); - exit(1); - } - - let num_repaired_roots = blockstore - .scan_and_fix_roots(Some(start_root), Some(end_root), &AtomicBool::new(false)) - .unwrap_or_else(|err| { - eprintln!("Unable to repair roots: {err}"); exit(1); - }); - println!("Successfully repaired {num_repaired_roots} roots"); - } - ("bounds", Some(arg_matches)) => { - let blockstore = open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + } - match blockstore.slot_meta_iterator(0) { - Ok(metas) => { - let output_format = - OutputFormat::from_matches(arg_matches, "output_format", false); - let all = arg_matches.is_present("all"); + let num_repaired_roots = blockstore + .scan_and_fix_roots( + Some(start_root), + Some(end_root), + &AtomicBool::new(false), + ) + .unwrap_or_else(|err| { + eprintln!("Unable to repair roots: {err}"); + exit(1); + }); + println!("Successfully repaired {num_repaired_roots} roots"); + } + ("bounds", Some(arg_matches)) => { + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); - let slots: Vec<_> = metas.map(|(slot, _)| slot).collect(); + match blockstore.slot_meta_iterator(0) { + Ok(metas) => { + let output_format = + OutputFormat::from_matches(arg_matches, "output_format", false); + let all = arg_matches.is_present("all"); - let slot_bounds = if slots.is_empty() { - SlotBounds::default() - } else { - // Collect info about slot bounds - let mut bounds = SlotBounds { - slots: SlotInfo { - total: slots.len(), - first: Some(*slots.first().unwrap()), - last: Some(*slots.last().unwrap()), - ..SlotInfo::default() - }, - ..SlotBounds::default() - }; - if all { - bounds.all_slots = Some(&slots); - } + let slots: Vec<_> = metas.map(|(slot, _)| slot).collect(); + + let slot_bounds = if slots.is_empty() { + SlotBounds::default() + } else { + // Collect info about slot bounds + let mut bounds = SlotBounds { + slots: SlotInfo { + total: slots.len(), + first: Some(*slots.first().unwrap()), + last: Some(*slots.last().unwrap()), + ..SlotInfo::default() + }, + ..SlotBounds::default() + }; + if all { + bounds.all_slots = Some(&slots); + } - // Consider also rooted slots, if present - if let Ok(rooted) = blockstore.rooted_slot_iterator(0) { - let mut first_rooted = None; - let mut last_rooted = None; - let mut total_rooted = 0; - for (i, slot) in rooted.into_iter().enumerate() { - if i == 0 { - first_rooted = Some(slot); + // Consider also rooted slots, if present + if let Ok(rooted) = blockstore.rooted_slot_iterator(0) { + let mut first_rooted = None; + let mut last_rooted = None; + let mut total_rooted = 0; + for (i, slot) in rooted.into_iter().enumerate() { + if i == 0 { + first_rooted = Some(slot); + } + last_rooted = Some(slot); + total_rooted += 1; } - last_rooted = Some(slot); - total_rooted += 1; + let last_root_for_comparison = last_rooted.unwrap_or_default(); + let count_past_root = slots + .iter() + .rev() + .take_while(|slot| *slot > &last_root_for_comparison) + .count(); + + bounds.roots = SlotInfo { + total: total_rooted, + first: first_rooted, + last: last_rooted, + num_after_last_root: Some(count_past_root), + }; } - let last_root_for_comparison = last_rooted.unwrap_or_default(); - let count_past_root = slots - .iter() - .rev() - .take_while(|slot| *slot > &last_root_for_comparison) - .count(); - - bounds.roots = SlotInfo { - total: total_rooted, - first: first_rooted, - last: last_rooted, - num_after_last_root: Some(count_past_root), - }; - } - bounds - }; + bounds + }; - // Print collected data - println!("{}", output_format.formatted_string(&slot_bounds)); - } - Err(err) => { - eprintln!("Unable to read the Ledger: {err:?}"); - exit(1); + // Print collected data + println!("{}", output_format.formatted_string(&slot_bounds)); + } + Err(err) => { + eprintln!("Unable to read the Ledger: {err:?}"); + exit(1); + } + }; + } + ("analyze-storage", Some(arg_matches)) => { + analyze_storage( + &open_blockstore(&ledger_path, arg_matches, AccessType::Secondary).db(), + ); + } + ("compute-slot-cost", Some(arg_matches)) => { + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + + let mut slots: Vec = vec![]; + if !arg_matches.is_present("slots") { + if let Ok(metas) = blockstore.slot_meta_iterator(0) { + slots = metas.map(|(slot, _)| slot).collect(); + } + } else { + slots = values_t_or_exit!(arg_matches, "slots", Slot); } - }; - } - ("analyze-storage", Some(arg_matches)) => { - analyze_storage( - &open_blockstore(&ledger_path, arg_matches, AccessType::Secondary).db(), - ); - } - ("compute-slot-cost", Some(arg_matches)) => { - let blockstore = open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); - let mut slots: Vec = vec![]; - if !arg_matches.is_present("slots") { - if let Ok(metas) = blockstore.slot_meta_iterator(0) { - slots = metas.map(|(slot, _)| slot).collect(); + for slot in slots { + if let Err(err) = compute_slot_cost(&blockstore, slot) { + eprintln!("{err}"); + } } - } else { - slots = values_t_or_exit!(arg_matches, "slots", Slot); } - - for slot in slots { - if let Err(err) = compute_slot_cost(&blockstore, slot) { + ("print-file-metadata", Some(arg_matches)) => { + let blockstore = + open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); + let sst_file_name = arg_matches.value_of("file_name"); + if let Err(err) = print_blockstore_file_metadata(&blockstore, &sst_file_name) { eprintln!("{err}"); } } - } - ("print-file-metadata", Some(arg_matches)) => { - let blockstore = open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); - let sst_file_name = arg_matches.value_of("file_name"); - if let Err(err) = print_blockstore_file_metadata(&blockstore, &sst_file_name) { - eprintln!("{err}"); + ("", _) => { + eprintln!("{}", matches.usage()); + exit(1); } - } - ("", _) => { - eprintln!("{}", matches.usage()); - exit(1); - } - _ => unreachable!(), - }; - measure_total_execution_time.stop(); - info!("{}", measure_total_execution_time); - } + _ => unreachable!(), + }; + } + }; + measure_total_execution_time.stop(); + info!("{}", measure_total_execution_time); } #[cfg(test)]