From 01b96b430a7c104eb4220807bd8419c4f9a0147f Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 6 Nov 2024 11:38:17 -0600 Subject: [PATCH] Remove Blockstore fifo compaction code (#3469) The use of fifo compaction in Blockstore has been deprecated at the CLI level of agave-validator. That is, --rocksdb-shred-compaction does not accept the value of `fifo` any longer. This change fully removes the fifo compaction code from Blockstore and related structs --- core/src/validator.rs | 11 +-- ledger-tool/src/blockstore.rs | 20 +--- ledger-tool/src/ledger_utils.rs | 27 +---- ledger-tool/tests/basic.rs | 34 +------ ledger/src/blockstore.rs | 80 +-------------- ledger/src/blockstore_db.rs | 103 +------------------ ledger/src/blockstore_metrics.rs | 3 - ledger/src/blockstore_options.rs | 165 +------------------------------ validator/src/main.rs | 43 +------- 9 files changed, 26 insertions(+), 460 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index d6a1c672a299c4..612bf68b0bc0c8 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -65,7 +65,7 @@ use { MAX_REPLAY_WAKE_UP_SIGNALS, }, blockstore_metric_report_service::BlockstoreMetricReportService, - blockstore_options::BlockstoreOptions, + blockstore_options::{BlockstoreOptions, BLOCKSTORE_DIRECTORY_ROCKS_LEVEL}, blockstore_processor::{self, TransactionStatusSender}, entry_notifier_interface::EntryNotifierArc, entry_notifier_service::{EntryNotifierSender, EntryNotifierService}, @@ -2339,14 +2339,7 @@ fn cleanup_blockstore_incorrect_shred_versions( // not critical, so swallow errors from backup blockstore operations. let backup_folder = format!( "{}_backup_{}_{}_{}", - config - .blockstore_options - .column_options - .shred_storage_type - .blockstore_directory(), - incorrect_shred_version, - start_slot, - end_slot + BLOCKSTORE_DIRECTORY_ROCKS_LEVEL, incorrect_shred_version, start_slot, end_slot ); match Blockstore::open_with_options( &blockstore.ledger_path().join(backup_folder), diff --git a/ledger-tool/src/blockstore.rs b/ledger-tool/src/blockstore.rs index 4ee47742de0994..d163ade5bdcf19 100644 --- a/ledger-tool/src/blockstore.rs +++ b/ledger-tool/src/blockstore.rs @@ -4,7 +4,7 @@ use { crate::{ error::{LedgerToolError, Result}, ledger_path::canonicalize_ledger_path, - ledger_utils::{get_program_ids, get_shred_storage_type}, + ledger_utils::get_program_ids, output::{output_ledger, output_slot, CliDuplicateSlotProof, SlotBounds, SlotInfo}, }, chrono::{DateTime, Utc}, @@ -21,7 +21,7 @@ use { ancestor_iterator::AncestorIterator, blockstore::{Blockstore, PurgeType}, blockstore_db::{self, Column, ColumnName, Database}, - blockstore_options::{AccessType, BLOCKSTORE_DIRECTORY_ROCKS_FIFO}, + blockstore_options::AccessType, shred::Shred, }, solana_sdk::{ @@ -669,22 +669,8 @@ fn do_blockstore_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) - let target_db = PathBuf::from(value_t_or_exit!(arg_matches, "target_db", String)); let source = crate::open_blockstore(&ledger_path, arg_matches, AccessType::Secondary); - - // Check if shred storage type can be inferred; if not, a new - // ledger is being created. open_blockstore() will attempt to - // to infer shred storage type as well, but this check provides - // extra insight to user on how to create a FIFO ledger. - let _ = get_shred_storage_type( - &target_db, - &format!( - "No --target-db ledger at {:?} was detected, default compaction \ - (RocksLevel) will be used. Fifo compaction can be enabled for a new \ - ledger by manually creating {BLOCKSTORE_DIRECTORY_ROCKS_FIFO} directory \ - within the specified --target_db directory.", - &target_db - ), - ); let target = crate::open_blockstore(&target_db, arg_matches, AccessType::Primary); + for (slot, _meta) in source.slot_meta_iterator(starting_slot)? { if slot > ending_slot { break; diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index 94778ee4b22407..df541aeea41ea0 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -19,7 +19,6 @@ use { blockstore::{Blockstore, BlockstoreError}, blockstore_options::{ AccessType, BlockstoreOptions, BlockstoreRecoveryMode, LedgerColumnOptions, - ShredStorageType, }, blockstore_processor::{ self, BlockstoreProcessorError, ProcessOptions, TransactionStatusSender, @@ -442,13 +441,6 @@ pub fn open_blockstore( .map(BlockstoreRecoveryMode::from); let force_update_to_open = matches.is_present("force_update_to_open"); let enforce_ulimit_nofile = !matches.is_present("ignore_ulimit_nofile_error"); - let shred_storage_type = get_shred_storage_type( - ledger_path, - &format!( - "Shred storage type cannot be inferred for ledger at {ledger_path:?}, using default \ - RocksLevel", - ), - ); match Blockstore::open_with_options( ledger_path, @@ -456,10 +448,7 @@ pub fn open_blockstore( access_type: access_type.clone(), recovery_mode: wal_recovery_mode.clone(), enforce_ulimit_nofile, - column_options: LedgerColumnOptions { - shred_storage_type, - ..LedgerColumnOptions::default() - }, + column_options: LedgerColumnOptions::default(), }, ) { Ok(blockstore) => blockstore, @@ -515,20 +504,6 @@ pub fn open_blockstore( } } -pub fn get_shred_storage_type(ledger_path: &Path, message: &str) -> ShredStorageType { - // TODO: the following shred_storage_type inference must be updated once - // the rocksdb options can be constructed via load_options_file() as the - // value picked by passing None for `max_shred_storage_size` could affect - // the persisted rocksdb options file. - match ShredStorageType::from_ledger_path(ledger_path, None) { - Some(s) => s, - None => { - info!("{}", message); - ShredStorageType::RocksLevel - } - } -} - /// Open blockstore with temporary primary access to allow necessary, /// persistent changes to be made to the blockstore (such as creation of new /// column family(s)). Then, continue opening with `original_access_type` diff --git a/ledger-tool/tests/basic.rs b/ledger-tool/tests/basic.rs index 2459f1287497c8..294360cf784b9c 100644 --- a/ledger-tool/tests/basic.rs +++ b/ledger-tool/tests/basic.rs @@ -1,12 +1,10 @@ use { assert_cmd::prelude::*, solana_ledger::{ - blockstore, blockstore::Blockstore, blockstore_options::ShredStorageType, - create_new_tmp_ledger_auto_delete, create_new_tmp_ledger_fifo_auto_delete, + blockstore, blockstore::Blockstore, create_new_tmp_ledger_auto_delete, genesis_utils::create_genesis_config, get_tmp_ledger_path_auto_delete, }, std::{ - fs, path::Path, process::{Command, Output}, }, @@ -46,13 +44,6 @@ fn nominal_default() { nominal_test_helper(ledger_path.path().to_str().unwrap()); } -#[test] -fn nominal_fifo() { - let genesis_config = create_genesis_config(100).genesis_config; - let (ledger_path, _blockhash) = create_new_tmp_ledger_fifo_auto_delete!(&genesis_config); - nominal_test_helper(ledger_path.path().to_str().unwrap()); -} - fn insert_test_shreds(ledger_path: &Path, ending_slot: u64) { let blockstore = Blockstore::open(ledger_path).unwrap(); let (shreds, _) = blockstore::make_many_slot_entries( @@ -63,25 +54,18 @@ fn insert_test_shreds(ledger_path: &Path, ending_slot: u64) { blockstore.insert_shreds(shreds, None, false).unwrap(); } -fn ledger_tool_copy_test(src_shred_compaction: &str, dst_shred_compaction: &str) { +#[test] +fn ledger_tool_copy_test() { let genesis_config = create_genesis_config(100).genesis_config; - let (ledger_path, _blockhash) = match src_shred_compaction { - "fifo" => create_new_tmp_ledger_fifo_auto_delete!(&genesis_config), - _ => create_new_tmp_ledger_auto_delete!(&genesis_config), - }; + let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); + const LEDGER_TOOL_COPY_TEST_SHRED_COUNT: u64 = 25; const LEDGER_TOOL_COPY_TEST_ENDING_SLOT: u64 = LEDGER_TOOL_COPY_TEST_SHRED_COUNT + 1; insert_test_shreds(ledger_path.path(), LEDGER_TOOL_COPY_TEST_ENDING_SLOT); let ledger_path = ledger_path.path().to_str().unwrap(); let target_ledger_path = get_tmp_ledger_path_auto_delete!(); - if dst_shred_compaction == "fifo" { - let rocksdb_fifo_path = target_ledger_path - .path() - .join(ShredStorageType::rocks_fifo(None).blockstore_directory()); - fs::create_dir_all(rocksdb_fifo_path).unwrap(); - } let target_ledger_path = target_ledger_path.path().to_str().unwrap(); let output = run_ledger_tool(&[ "-l", @@ -103,11 +87,3 @@ fn ledger_tool_copy_test(src_shred_compaction: &str, dst_shred_compaction: &str) assert!(!src_slot_output.stdout.is_empty()); } } - -#[test] -fn copy_test() { - ledger_tool_copy_test("level", "level"); - ledger_tool_copy_test("level", "fifo"); - ledger_tool_copy_test("fifo", "level"); - ledger_tool_copy_test("fifo", "fifo"); -} diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index a3b6588ae680c2..3f43af75223883 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -12,8 +12,7 @@ use { blockstore_meta::*, blockstore_metrics::BlockstoreRpcApiMetrics, blockstore_options::{ - AccessType, BlockstoreOptions, LedgerColumnOptions, BLOCKSTORE_DIRECTORY_ROCKS_FIFO, - BLOCKSTORE_DIRECTORY_ROCKS_LEVEL, + AccessType, BlockstoreOptions, LedgerColumnOptions, BLOCKSTORE_DIRECTORY_ROCKS_LEVEL, }, blockstore_processor::BlockstoreProcessorError, leader_schedule_cache::LeaderScheduleCache, @@ -336,12 +335,7 @@ impl Blockstore { fn do_open(ledger_path: &Path, options: BlockstoreOptions) -> Result { fs::create_dir_all(ledger_path)?; - let blockstore_path = ledger_path.join( - options - .column_options - .shred_storage_type - .blockstore_directory(), - ); + let blockstore_path = ledger_path.join(BLOCKSTORE_DIRECTORY_ROCKS_LEVEL); adjust_ulimit_nofile(options.enforce_ulimit_nofile)?; @@ -497,9 +491,7 @@ impl Blockstore { pub fn destroy(ledger_path: &Path) -> Result<()> { // Database::destroy() fails if the root directory doesn't exist fs::create_dir_all(ledger_path)?; - Database::destroy(&Path::new(ledger_path).join(BLOCKSTORE_DIRECTORY_ROCKS_LEVEL)).and( - Database::destroy(&Path::new(ledger_path).join(BLOCKSTORE_DIRECTORY_ROCKS_FIFO)), - ) + Database::destroy(&Path::new(ledger_path).join(BLOCKSTORE_DIRECTORY_ROCKS_LEVEL)) } /// Returns the SlotMeta of the specified slot. @@ -4866,7 +4858,7 @@ pub fn create_new_ledger( genesis_config.write(ledger_path)?; // Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger. - let blockstore_dir = column_options.shred_storage_type.blockstore_directory(); + let blockstore_dir = BLOCKSTORE_DIRECTORY_ROCKS_LEVEL; let blockstore = Blockstore::open_with_options( ledger_path, BlockstoreOptions { @@ -5047,23 +5039,6 @@ macro_rules! create_new_tmp_ledger_with_size { }; } -#[macro_export] -macro_rules! create_new_tmp_ledger_fifo { - ($genesis_config:expr) => { - $crate::blockstore::create_new_ledger_from_name( - $crate::tmp_ledger_name!(), - $genesis_config, - $crate::macro_reexports::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, - $crate::blockstore_options::LedgerColumnOptions { - shred_storage_type: $crate::blockstore_options::ShredStorageType::RocksFifo( - $crate::blockstore_options::BlockstoreRocksFifoOptions::new_for_tests(), - ), - ..$crate::blockstore_options::LedgerColumnOptions::default() - }, - ) - }; -} - #[macro_export] macro_rules! create_new_tmp_ledger_auto_delete { ($genesis_config:expr) => { @@ -5076,23 +5051,6 @@ macro_rules! create_new_tmp_ledger_auto_delete { }; } -#[macro_export] -macro_rules! create_new_tmp_ledger_fifo_auto_delete { - ($genesis_config:expr) => { - $crate::blockstore::create_new_ledger_from_name_auto_delete( - $crate::tmp_ledger_name!(), - $genesis_config, - $crate::macro_reexports::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, - $crate::blockstore_options::LedgerColumnOptions { - shred_storage_type: $crate::blockstore_options::ShredStorageType::RocksFifo( - $crate::blockstore_options::BlockstoreRocksFifoOptions::new_for_tests(), - ), - ..$crate::blockstore_options::LedgerColumnOptions::default() - }, - ) - }; -} - pub(crate) fn verify_shred_slots(slot: Slot, parent: Slot, root: Slot) -> bool { if slot == 0 && parent == 0 && root == 0 { return true; // valid write to slot zero. @@ -5390,7 +5348,6 @@ pub mod tests { use { super::*, crate::{ - blockstore_options::{BlockstoreRocksFifoOptions, ShredStorageType}, genesis_utils::{create_genesis_config, GenesisConfigInfo}, leader_schedule::{FixedSchedule, LeaderSchedule}, shred::{max_ticks_per_n_shreds, ShredFlags, LEGACY_SHRED_DATA_CAPACITY}, @@ -5486,35 +5443,6 @@ pub mod tests { ); } - #[test] - fn test_create_new_ledger_with_options_fifo() { - solana_logger::setup(); - let mint_total = 1_000_000_000_000; - let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(mint_total); - let (ledger_path, _blockhash) = create_new_tmp_ledger_fifo_auto_delete!(&genesis_config); - let blockstore = Blockstore::open_with_options( - ledger_path.path(), - BlockstoreOptions { - column_options: LedgerColumnOptions { - shred_storage_type: ShredStorageType::RocksFifo( - BlockstoreRocksFifoOptions::new_for_tests(), - ), - ..LedgerColumnOptions::default() - }, - ..BlockstoreOptions::default() - }, - ) - .unwrap(); - - let ticks = create_ticks(genesis_config.ticks_per_slot, 0, genesis_config.hash()); - let entries = blockstore.get_slot_entries(0, 0).unwrap(); - - assert_eq!(ticks, entries); - assert!(Path::new(ledger_path.path()) - .join(BLOCKSTORE_DIRECTORY_ROCKS_FIFO) - .exists()); - } - #[test] fn test_insert_get_bytes() { // Create enough entries to ensure there are at least two shreds created diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index 8c96403f20e9da..bd96fe6d8b4bb8 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -9,9 +9,7 @@ use { PERF_METRIC_OP_NAME_MULTI_GET, PERF_METRIC_OP_NAME_PUT, PERF_METRIC_OP_NAME_WRITE_BATCH, }, - blockstore_options::{ - AccessType, BlockstoreOptions, LedgerColumnOptions, ShredStorageType, - }, + blockstore_options::{AccessType, BlockstoreOptions, LedgerColumnOptions}, }, bincode::{deserialize, serialize}, byteorder::{BigEndian, ByteOrder}, @@ -22,9 +20,8 @@ use { compaction_filter::CompactionFilter, compaction_filter_factory::{CompactionFilterContext, CompactionFilterFactory}, properties as RocksProperties, ColumnFamily, ColumnFamilyDescriptor, CompactionDecision, - DBCompactionStyle, DBCompressionType, DBIterator, DBPinnableSlice, DBRawIterator, - FifoCompactOptions, IteratorMode as RocksIteratorMode, LiveFile, Options, - WriteBatch as RWriteBatch, DB, + DBCompressionType, DBIterator, DBPinnableSlice, DBRawIterator, + IteratorMode as RocksIteratorMode, LiveFile, Options, WriteBatch as RWriteBatch, DB, }, serde::{de::DeserializeOwned, Serialize}, solana_accounts_db::hardened_unpack::UnpackError, @@ -51,7 +48,6 @@ use { const BLOCKSTORE_METRICS_ERROR: i64 = -1; const MAX_WRITE_BUFFER_SIZE: u64 = 256 * 1024 * 1024; // 256MB -const FIFO_WRITE_BUFFER_SIZE: u64 = 2 * MAX_WRITE_BUFFER_SIZE; // SST files older than this value will be picked up for compaction. This value // was chosen to be one day to strike a balance between storage getting @@ -480,8 +476,6 @@ impl Rocks { ) -> Vec { use columns::*; - let (cf_descriptor_shred_data, cf_descriptor_shred_code) = - new_cf_descriptor_pair_shreds::(options, oldest_slot); let mut cf_descriptors = vec![ new_cf_descriptor::(options, oldest_slot), new_cf_descriptor::(options, oldest_slot), @@ -491,8 +485,8 @@ impl Rocks { new_cf_descriptor::(options, oldest_slot), new_cf_descriptor::(options, oldest_slot), new_cf_descriptor::(options, oldest_slot), - cf_descriptor_shred_data, - cf_descriptor_shred_code, + new_cf_descriptor::(options, oldest_slot), + new_cf_descriptor::(options, oldest_slot), new_cf_descriptor::(options, oldest_slot), new_cf_descriptor::(options, oldest_slot), new_cf_descriptor::(options, oldest_slot), @@ -2058,93 +2052,6 @@ fn process_cf_options_advanced( } } -/// Creates and returns the column family descriptors for both data shreds and -/// coding shreds column families. -/// -/// @return a pair of ColumnFamilyDescriptor where the first / second elements -/// are associated to the first / second template class respectively. -fn new_cf_descriptor_pair_shreds< - D: 'static + Column + ColumnName, // Column Family for Data Shred - C: 'static + Column + ColumnName, // Column Family for Coding Shred ->( - options: &BlockstoreOptions, - oldest_slot: &OldestSlot, -) -> (ColumnFamilyDescriptor, ColumnFamilyDescriptor) { - match &options.column_options.shred_storage_type { - ShredStorageType::RocksLevel => ( - new_cf_descriptor::(options, oldest_slot), - new_cf_descriptor::(options, oldest_slot), - ), - ShredStorageType::RocksFifo(fifo_options) => ( - new_cf_descriptor_fifo::(&fifo_options.shred_data_cf_size, &options.column_options), - new_cf_descriptor_fifo::(&fifo_options.shred_code_cf_size, &options.column_options), - ), - } -} - -fn new_cf_descriptor_fifo( - max_cf_size: &u64, - column_options: &LedgerColumnOptions, -) -> ColumnFamilyDescriptor { - if *max_cf_size > FIFO_WRITE_BUFFER_SIZE { - ColumnFamilyDescriptor::new( - C::NAME, - get_cf_options_fifo::(max_cf_size, column_options), - ) - } else { - panic!( - "{} cf_size must be greater than write buffer size {} when using \ - ShredStorageType::RocksFifo.", - C::NAME, - FIFO_WRITE_BUFFER_SIZE - ); - } -} - -/// Returns the RocksDB Column Family Options which use FIFO Compaction. -/// -/// Note that this CF options is optimized for workloads which write-keys -/// are mostly monotonically increasing over time. For workloads where -/// write-keys do not follow any order in general should use get_cf_options -/// instead. -/// -/// - [`max_cf_size`]: the maximum allowed column family size. Note that -/// rocksdb will start deleting the oldest SST file when the column family -/// size reaches `max_cf_size` - `FIFO_WRITE_BUFFER_SIZE` to strictly -/// maintain the size limit. -fn get_cf_options_fifo( - max_cf_size: &u64, - column_options: &LedgerColumnOptions, -) -> Options { - let mut options = Options::default(); - - options.set_max_write_buffer_number(8); - options.set_write_buffer_size(FIFO_WRITE_BUFFER_SIZE as usize); - // FIFO always has its files in L0 so we only have one level. - options.set_num_levels(1); - // Since FIFO puts all its file in L0, it is suggested to have unlimited - // number of open files. The actual total number of open files will - // be close to max_cf_size / write_buffer_size. - options.set_max_open_files(-1); - - let mut fifo_compact_options = FifoCompactOptions::default(); - - // Note that the following actually specifies size trigger for deleting - // the oldest SST file instead of specifying the size limit as its name - // might suggest. As a result, we should trigger the file deletion when - // the size reaches `max_cf_size - write_buffer_size` in order to correctly - // maintain the storage size limit. - fifo_compact_options - .set_max_table_files_size((*max_cf_size).saturating_sub(FIFO_WRITE_BUFFER_SIZE)); - - options.set_compaction_style(DBCompactionStyle::Fifo); - options.set_fifo_compaction_options(&fifo_compact_options); - - process_cf_options_advanced::(&mut options, column_options); - - options -} - fn get_db_options(access_type: &AccessType) -> Options { let mut options = Options::default(); diff --git a/ledger/src/blockstore_metrics.rs b/ledger/src/blockstore_metrics.rs index eea1ba0ce01d0b..c2752ad8c25901 100644 --- a/ledger/src/blockstore_metrics.rs +++ b/ledger/src/blockstore_metrics.rs @@ -347,7 +347,6 @@ impl BlockstoreRocksDbColumnFamilyMetrics { "blockstore_rocksdb_cfs", // tags that support group-by operations "cf_name" => cf_name, - "storage" => column_options.get_storage_type_string(), "compression" => column_options.get_compression_type_string(), // Size related ( @@ -459,7 +458,6 @@ pub(crate) fn report_rocksdb_read_perf( // tags that support group-by operations "op" => op_name, "cf_name" => cf_name, - "storage" => column_options.get_storage_type_string(), "compression" => column_options.get_compression_type_string(), // total nanos spent on the entire operation. ("total_op_nanos", total_op_duration.as_nanos() as i64, i64), @@ -635,7 +633,6 @@ pub(crate) fn report_rocksdb_write_perf( // tags that support group-by operations "op" => op_name, "cf_name" => cf_name, - "storage" => column_options.get_storage_type_string(), "compression" => column_options.get_compression_type_string(), // total nanos spent on the entire operation. ("total_op_nanos", total_op_duration.as_nanos() as i64, i64), diff --git a/ledger/src/blockstore_options.rs b/ledger/src/blockstore_options.rs index fb8718a66a5e86..977323fe9c3ffc 100644 --- a/ledger/src/blockstore_options.rs +++ b/ledger/src/blockstore_options.rs @@ -1,7 +1,7 @@ -use { - rocksdb::{DBCompressionType as RocksCompressionType, DBRecoveryMode}, - std::path::Path, -}; +use rocksdb::{DBCompressionType as RocksCompressionType, DBRecoveryMode}; + +/// The subdirectory under ledger directory where the Blockstore lives +pub const BLOCKSTORE_DIRECTORY_ROCKS_LEVEL: &str = "rocksdb"; #[derive(Debug, Clone)] pub struct BlockstoreOptions { @@ -92,11 +92,8 @@ impl From for DBRecoveryMode { /// Options for LedgerColumn. /// Each field might also be used as a tag that supports group-by operation when /// reporting metrics. -#[derive(Debug, Clone)] +#[derive(Default, Debug, Clone)] pub struct LedgerColumnOptions { - // Determine how to store both data and coding shreds. Default: RocksLevel. - pub shred_storage_type: ShredStorageType, - // Determine the way to compress column families which are eligible for // compression. pub compression_type: BlockstoreCompressionType, @@ -107,24 +104,7 @@ pub struct LedgerColumnOptions { pub rocks_perf_sample_interval: usize, } -impl Default for LedgerColumnOptions { - fn default() -> Self { - Self { - shred_storage_type: ShredStorageType::RocksLevel, - compression_type: BlockstoreCompressionType::default(), - rocks_perf_sample_interval: 0, - } - } -} - impl LedgerColumnOptions { - pub fn get_storage_type_string(&self) -> &'static str { - match self.shred_storage_type { - ShredStorageType::RocksLevel => "rocks_level", - ShredStorageType::RocksFifo(_) => "rocks_fifo", - } - } - pub fn get_compression_type_string(&self) -> &'static str { match self.compression_type { BlockstoreCompressionType::None => "None", @@ -135,125 +115,6 @@ impl LedgerColumnOptions { } } -#[derive(Debug, Clone)] -pub enum ShredStorageType { - // Stores shreds under RocksDB's default compaction (level). - RocksLevel, - // (Experimental) Stores shreds under RocksDB's FIFO compaction which - // allows ledger store to reclaim storage more efficiently with - // lower I/O overhead. - RocksFifo(BlockstoreRocksFifoOptions), -} - -impl Default for ShredStorageType { - fn default() -> Self { - Self::RocksLevel - } -} - -pub const BLOCKSTORE_DIRECTORY_ROCKS_LEVEL: &str = "rocksdb"; -pub const BLOCKSTORE_DIRECTORY_ROCKS_FIFO: &str = "rocksdb_fifo"; - -impl ShredStorageType { - /// Returns a ShredStorageType::RocksFifo, see BlockstoreRocksFifoOptions - /// for more details on how `max_shred_storage_size` is interpreted. - pub fn rocks_fifo(max_shred_storage_size: Option) -> ShredStorageType { - ShredStorageType::RocksFifo(BlockstoreRocksFifoOptions::new(max_shred_storage_size)) - } - - /// The directory under `ledger_path` to the underlying blockstore. - pub fn blockstore_directory(&self) -> &str { - match self { - ShredStorageType::RocksLevel => BLOCKSTORE_DIRECTORY_ROCKS_LEVEL, - ShredStorageType::RocksFifo(_) => BLOCKSTORE_DIRECTORY_ROCKS_FIFO, - } - } - - /// Returns the ShredStorageType that is used under the specified - /// ledger_path. - /// - /// None will be returned if the ShredStorageType cannot be inferred. - pub fn from_ledger_path( - ledger_path: &Path, - max_fifo_shred_storage_size: Option, - ) -> Option { - let mut result: Option = None; - - if Path::new(ledger_path) - .join(BLOCKSTORE_DIRECTORY_ROCKS_LEVEL) - .exists() - { - result = Some(ShredStorageType::RocksLevel); - } - - if Path::new(ledger_path) - .join(BLOCKSTORE_DIRECTORY_ROCKS_FIFO) - .exists() - { - if result.is_none() { - result = Some(ShredStorageType::RocksFifo( - BlockstoreRocksFifoOptions::new(max_fifo_shred_storage_size), - )); - } else { - result = None; - } - } - result - } -} - -#[derive(Debug, Clone)] -pub struct BlockstoreRocksFifoOptions { - // The maximum storage size for storing data shreds in column family - // [`cf::DataShred`]. Typically, data shreds contribute around 25% of the - // ledger store storage size if the RPC service is enabled, or 50% if RPC - // service is not enabled. - // - // Note that this number must be greater than FIFO_WRITE_BUFFER_SIZE - // otherwise we won't be able to write any file. If not, the blockstore - // will panic. - pub shred_data_cf_size: u64, - // The maximum storage size for storing coding shreds in column family - // [`cf::CodeShred`]. Typically, coding shreds contribute around 20% of the - // ledger store storage size if the RPC service is enabled, or 40% if RPC - // service is not enabled. - // - // Note that this number must be greater than FIFO_WRITE_BUFFER_SIZE - // otherwise we won't be able to write any file. If not, the blockstore - // will panic. - pub shred_code_cf_size: u64, -} - -pub const MAX_ROCKS_FIFO_SHRED_STORAGE_SIZE_BYTES: u64 = u64::MAX; - -impl BlockstoreRocksFifoOptions { - /// Returns a BlockstoreRocksFifoOptions where the specified - /// `max_shred_storage_size` is equally split between shred_data_cf_size - /// and shred_code_cf_size. A `None` value for `max_shred_storage_size` - /// will (functionally) allow unbounded growth in these two columns. Once - /// a column's total size exceeds the configured value, the oldest file(s) - /// will be purged to get back within the limit. - fn new(max_shred_storage_size: Option) -> Self { - match max_shred_storage_size { - Some(size) => Self { - shred_data_cf_size: size / 2, - shred_code_cf_size: size / 2, - }, - None => Self { - shred_data_cf_size: MAX_ROCKS_FIFO_SHRED_STORAGE_SIZE_BYTES, - shred_code_cf_size: MAX_ROCKS_FIFO_SHRED_STORAGE_SIZE_BYTES, - }, - } - } - - pub fn new_for_tests() -> Self { - Self { - shred_data_cf_size: 150_000_000_000, - shred_code_cf_size: 150_000_000_000, - } - } -} - #[derive(Debug, Clone)] pub enum BlockstoreCompressionType { None, @@ -278,19 +139,3 @@ impl BlockstoreCompressionType { } } } - -#[test] -fn test_rocksdb_directory() { - assert_eq!( - ShredStorageType::RocksLevel.blockstore_directory(), - BLOCKSTORE_DIRECTORY_ROCKS_LEVEL - ); - assert_eq!( - ShredStorageType::RocksFifo(BlockstoreRocksFifoOptions { - shred_code_cf_size: 0, - shred_data_cf_size: 0 - }) - .blockstore_directory(), - BLOCKSTORE_DIRECTORY_ROCKS_FIFO - ); -} diff --git a/validator/src/main.rs b/validator/src/main.rs index 5c0105ba6c76b0..877081ae125235 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -45,7 +45,7 @@ use { blockstore_cleanup_service::{DEFAULT_MAX_LEDGER_SHREDS, DEFAULT_MIN_MAX_LEDGER_SHREDS}, blockstore_options::{ AccessType, BlockstoreCompressionType, BlockstoreOptions, BlockstoreRecoveryMode, - LedgerColumnOptions, ShredStorageType, + LedgerColumnOptions, }, use_snapshot_archives_at_startup::{self, UseSnapshotArchivesAtStartup}, }, @@ -381,20 +381,6 @@ fn set_repair_whitelist( Ok(()) } -/// Returns the default fifo shred storage size (include both data and coding -/// shreds) based on the validator config. -fn default_fifo_shred_storage_size(max_ledger_shreds: Option) -> Option { - // The max shred size is around 1228 bytes. - // Here we reserve a little bit more than that to give extra storage for FIFO - // to prevent it from purging data that have not yet being marked as obsoleted - // by LedgerCleanupService. - const RESERVED_BYTES_PER_SHRED: u64 = 1500; - max_ledger_shreds.map(|max_ledger_shreds| { - // x2 as we have data shred and coding shred. - max_ledger_shreds * RESERVED_BYTES_PER_SHRED * 2 - }) -} - // This function is duplicated in ledger-tool/src/main.rs... fn hardforks_of(matches: &ArgMatches<'_>, name: &str) -> Option> { if matches.is_present(name) { @@ -1051,33 +1037,6 @@ pub fn main() { _ => panic!("Unsupported ledger_compression: {ledger_compression_string}"), }, }, - shred_storage_type: match matches.value_of("rocksdb_shred_compaction") { - None => ShredStorageType::default(), - Some(shred_compaction_string) => match shred_compaction_string { - "level" => ShredStorageType::RocksLevel, - "fifo" => { - warn!( - "The value \"fifo\" for --rocksdb-shred-compaction has been deprecated. \ - Use of \"fifo\" will still work for now, but is planned for full removal \ - in v2.1. To update, use \"level\" for --rocksdb-shred-compaction, or \ - remove the --rocksdb-shred-compaction argument altogether. Note that the \ - entire \"rocksdb_fifo\" subdirectory within the ledger directory will \ - need to be manually removed once the validator is running with \"level\"." - ); - match matches.value_of("rocksdb_fifo_shred_storage_size") { - None => ShredStorageType::rocks_fifo(default_fifo_shred_storage_size( - max_ledger_shreds, - )), - Some(_) => ShredStorageType::rocks_fifo(Some(value_t_or_exit!( - matches, - "rocksdb_fifo_shred_storage_size", - u64 - ))), - } - } - _ => panic!("Unrecognized rocksdb-shred-compaction: {shred_compaction_string}"), - }, - }, rocks_perf_sample_interval: value_t_or_exit!( matches, "rocksdb_perf_sample_interval",