From 35e61f9dbc0c1969cb2285818f6d571d7b1cc4d6 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Tue, 18 Feb 2025 13:33:46 +0300 Subject: [PATCH 01/45] Initial rollback implementation --- bin/cli/src/commands/rollback.rs | 26 +++++- bin/cli/src/main.rs | 9 +- .../src/rollback/components/ledger_db.rs | 4 + .../src/rollback/components/mod.rs | 7 ++ .../src/rollback/components/native_db.rs | 33 ++++++++ .../src/rollback/components/state_db.rs | 83 +++++++++++++++++++ crates/storage-ops/src/rollback/mod.rs | 55 +++++++++++- crates/storage-ops/src/rollback/service.rs | 6 +- 8 files changed, 210 insertions(+), 13 deletions(-) create mode 100644 crates/storage-ops/src/rollback/components/ledger_db.rs create mode 100644 crates/storage-ops/src/rollback/components/mod.rs create mode 100644 crates/storage-ops/src/rollback/components/native_db.rs create mode 100644 crates/storage-ops/src/rollback/components/state_db.rs diff --git a/bin/cli/src/commands/rollback.rs b/bin/cli/src/commands/rollback.rs index da40db8f4e..2b1428a2e9 100644 --- a/bin/cli/src/commands/rollback.rs +++ b/bin/cli/src/commands/rollback.rs @@ -1,3 +1,27 @@ -pub(crate) async fn rollback(_num_block: u32) -> anyhow::Result<()> { +use std::path::PathBuf; +use std::sync::Arc; + +use citrea_storage_ops::rollback::Rollback; +use sov_db::ledger_db::{LedgerDB, SharedLedgerOps}; +use sov_db::native_db::NativeDB; +use sov_db::rocks_db_config::RocksdbConfig; +use sov_db::state_db::StateDB; +use tracing::info; + +pub(crate) async fn rollback(db_path: PathBuf, num_blocks: u64) -> anyhow::Result<()> { + info!( + "Rolling back DB at {} {} down", + db_path.display(), + num_blocks + ); + + let rocksdb_config = RocksdbConfig::new(&db_path, None, None); + let ledger_db = LedgerDB::with_config(&rocksdb_config)?; + let native_db = NativeDB::setup_schema_db(&rocksdb_config)?; + let state_db = StateDB::setup_schema_db(&rocksdb_config)?; + + let rollback = Rollback::new(ledger_db.inner(), Arc::new(state_db), Arc::new(native_db)); + rollback.execute(num_blocks).await?; + Ok(()) } diff --git a/bin/cli/src/main.rs b/bin/cli/src/main.rs index 77bdd7d6af..a1d87338e6 100644 --- a/bin/cli/src/main.rs +++ b/bin/cli/src/main.rs @@ -56,7 +56,7 @@ enum Commands { db_path: PathBuf, /// The number of blocks to rollback #[arg(long)] - blocks: u32, + blocks: u64, }, /// Backup DBs Backup { @@ -88,11 +88,8 @@ async fn main() -> anyhow::Result<()> { } => { commands::prune(node_type, db_path.clone(), distance).await?; } - Commands::Rollback { - db_path: _db_path, - blocks, - } => { - commands::rollback(blocks).await?; + Commands::Rollback { db_path, blocks } => { + commands::rollback(db_path.clone(), blocks).await?; } Commands::Backup { db_path, diff --git a/crates/storage-ops/src/rollback/components/ledger_db.rs b/crates/storage-ops/src/rollback/components/ledger_db.rs new file mode 100644 index 0000000000..d3bff6cb70 --- /dev/null +++ b/crates/storage-ops/src/rollback/components/ledger_db.rs @@ -0,0 +1,4 @@ +use std::sync::Arc; + +/// Rollback native DB +pub(crate) fn rollback_ledger_db(_ledger_db: Arc, _down_to_block: u64) {} diff --git a/crates/storage-ops/src/rollback/components/mod.rs b/crates/storage-ops/src/rollback/components/mod.rs new file mode 100644 index 0000000000..f1decd0dd2 --- /dev/null +++ b/crates/storage-ops/src/rollback/components/mod.rs @@ -0,0 +1,7 @@ +mod ledger_db; +mod native_db; +mod state_db; + +pub(crate) use ledger_db::*; +pub(crate) use native_db::*; +pub(crate) use state_db::*; diff --git a/crates/storage-ops/src/rollback/components/native_db.rs b/crates/storage-ops/src/rollback/components/native_db.rs new file mode 100644 index 0000000000..70f15e319c --- /dev/null +++ b/crates/storage-ops/src/rollback/components/native_db.rs @@ -0,0 +1,33 @@ +use std::sync::Arc; + +use sov_db::schema::tables::ModuleAccessoryState; +use tracing::{debug, error}; + +/// Rollback native DB +pub(crate) fn rollback_native_db(native_db: Arc, down_to_block: u64) { + debug!("Rolling back native DB, down to L2 block {}", down_to_block); + + let Ok(mut iter) = native_db.iter::() else { + return; + }; + + iter.seek_to_last(); + + let mut counter = 0u32; + let mut keys_to_delete = vec![]; + while let Some(Ok(entry)) = iter.next() { + let version = entry.key.1; + // The version value is always ahead of block number by one. + if version >= down_to_block + 1 { + keys_to_delete.push(entry.key); + counter += 1; + } + } + + if let Err(e) = native_db.delete_batch::(keys_to_delete) { + error!("Failed to delete native DB entry {:?}", e); + return; + } + + debug!("Rolled back {} native DB records", counter); +} diff --git a/crates/storage-ops/src/rollback/components/state_db.rs b/crates/storage-ops/src/rollback/components/state_db.rs new file mode 100644 index 0000000000..a918f10353 --- /dev/null +++ b/crates/storage-ops/src/rollback/components/state_db.rs @@ -0,0 +1,83 @@ +use std::sync::Arc; + +use jmt::storage::Node; +use sov_db::schema::tables::{JmtNodes, JmtValues, KeyHashToKey}; +use sov_schema_db::SchemaBatch; +use tracing::{error, info}; + +/// Rollback state DB +#[allow(dead_code)] +pub(crate) fn rollback_state_db(state_db: Arc, down_to_block: u64) { + info!("Rolling back state DB, down to L2 block {}", down_to_block); + + let to_version = down_to_block + 1; + + let mut indices = state_db + .iter::() + .expect("Tried to rollback state DB but could not obtain an iterator"); + + indices.seek_to_last(); + + let mut deletions = 0; + + let mut batch = SchemaBatch::new(); + for index in indices { + let Ok(index) = index else { + continue; + }; + + let node_key = index.key; + let node = index.value; + + // Exit loop if we go down below the target block + if node_key.version() < to_version { + break; + } + + let key_hash = match node { + Node::Null => continue, + Node::Internal(_) => { + if let Err(e) = batch.delete::(&node_key) { + error!( + "Could not add JMT node deletion to schema batch operation: {:?}", + e + ); + } + + deletions += 1; + continue; + } + Node::Leaf(leaf) => leaf.key_hash(), + }; + + let key_preimage = match state_db.get::(&key_hash.0) { + Ok(Some(key)) => key, + _ => { + error!("Could not read key from key hash"); + continue; + } + }; + + if let Err(e) = batch.delete::(&(key_preimage, node_key.version())) { + error!( + "Could not add JMT value deletion to schema batch operation: {:?}", + e + ); + } + + if let Err(e) = batch.delete::(&node_key) { + error!( + "Could not add JMT node deletion to schema batch operation: {:?}", + e + ); + } + + deletions += 2; + } + + if let Err(e) = state_db.write_schemas(batch) { + error!("Could not delete state data: {:?}", e); + } + + info!("Rolled back {} records from state DB", deletions); +} diff --git a/crates/storage-ops/src/rollback/mod.rs b/crates/storage-ops/src/rollback/mod.rs index ee48880323..4d595ddb77 100644 --- a/crates/storage-ops/src/rollback/mod.rs +++ b/crates/storage-ops/src/rollback/mod.rs @@ -1,11 +1,60 @@ +use std::sync::Arc; + +use components::{rollback_ledger_db, rollback_native_db, rollback_state_db}; +use futures::future; +use tracing::info; + +mod components; pub mod service; -pub struct Rollback {} +pub struct Rollback { + /// Access to ledger tables. + ledger_db: Arc, + /// Access to native DB. + native_db: Arc, + /// Access to state DB. + state_db: Arc, +} impl Rollback { + pub fn new( + ledger_db: Arc, + state_db: Arc, + native_db: Arc, + ) -> Self { + // distance is the only criteria implemented at the moment. + Self { + ledger_db, + state_db, + native_db, + } + } + /// Rollback the provided number of blocks - pub fn execute(&self, _num_blocks: u32) -> anyhow::Result<()> { - // Do something + pub async fn execute(&self, num_blocks: u64) -> anyhow::Result<()> { + info!("Rolling back by {} blocks", num_blocks); + + let ledger_db = self.ledger_db.clone(); + let native_db = self.native_db.clone(); + let state_db = self.state_db.clone(); + + let up_to_block = 0; + + let ledger_pruning_handle = + tokio::task::spawn_blocking(move || rollback_ledger_db(ledger_db, num_blocks)); + + let state_db_pruning_handle = + tokio::task::spawn_blocking(move || rollback_state_db(state_db, up_to_block)); + + let native_db_pruning_handle = + tokio::task::spawn_blocking(move || rollback_native_db(native_db, up_to_block)); + + future::join_all([ + ledger_pruning_handle, + state_db_pruning_handle, + native_db_pruning_handle, + ]) + .await; Ok(()) } diff --git a/crates/storage-ops/src/rollback/service.rs b/crates/storage-ops/src/rollback/service.rs index 2fbd05e6c8..fccbe5212a 100644 --- a/crates/storage-ops/src/rollback/service.rs +++ b/crates/storage-ops/src/rollback/service.rs @@ -7,11 +7,11 @@ use super::Rollback; pub struct RollbackService { rollback: Rollback, - receiver: Receiver, + receiver: Receiver, } impl RollbackService { - pub fn new(rollback: Rollback, receiver: Receiver) -> Self { + pub fn new(rollback: Rollback, receiver: Receiver) -> Self { Self { rollback, receiver } } @@ -25,7 +25,7 @@ impl RollbackService { }, Some(num_blocks) = self.receiver.recv() => { info!("Received signal to rollback {num_blocks} blocks"); - if let Err(e) = self.rollback.execute(num_blocks) { + if let Err(e) = self.rollback.execute(num_blocks).await { panic!("Could not rollback blocks: {:?}", e); } } From 549125026649810a5068fc6b82980c628869af13 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Tue, 18 Feb 2025 13:44:04 +0300 Subject: [PATCH 02/45] Calculate down_to_block properly --- bin/cli/src/commands/rollback.rs | 8 +++++++- crates/storage-ops/src/rollback/mod.rs | 10 +++++----- crates/storage-ops/src/rollback/service.rs | 8 ++++---- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/bin/cli/src/commands/rollback.rs b/bin/cli/src/commands/rollback.rs index 2b1428a2e9..2096ad1021 100644 --- a/bin/cli/src/commands/rollback.rs +++ b/bin/cli/src/commands/rollback.rs @@ -20,8 +20,14 @@ pub(crate) async fn rollback(db_path: PathBuf, num_blocks: u64) -> anyhow::Resul let native_db = NativeDB::setup_schema_db(&rocksdb_config)?; let state_db = StateDB::setup_schema_db(&rocksdb_config)?; + let Some(soft_confirmation_number) = ledger_db.get_head_soft_confirmation_height()? else { + return Ok(()); + }; + let rollback = Rollback::new(ledger_db.inner(), Arc::new(state_db), Arc::new(native_db)); - rollback.execute(num_blocks).await?; + rollback + .execute(soft_confirmation_number, num_blocks) + .await?; Ok(()) } diff --git a/crates/storage-ops/src/rollback/mod.rs b/crates/storage-ops/src/rollback/mod.rs index 4d595ddb77..a7d81b98db 100644 --- a/crates/storage-ops/src/rollback/mod.rs +++ b/crates/storage-ops/src/rollback/mod.rs @@ -31,23 +31,23 @@ impl Rollback { } /// Rollback the provided number of blocks - pub async fn execute(&self, num_blocks: u64) -> anyhow::Result<()> { + pub async fn execute(&self, current_l2_height: u64, num_blocks: u64) -> anyhow::Result<()> { info!("Rolling back by {} blocks", num_blocks); let ledger_db = self.ledger_db.clone(); let native_db = self.native_db.clone(); let state_db = self.state_db.clone(); - let up_to_block = 0; + let down_to_block = current_l2_height - num_blocks + 1; let ledger_pruning_handle = - tokio::task::spawn_blocking(move || rollback_ledger_db(ledger_db, num_blocks)); + tokio::task::spawn_blocking(move || rollback_ledger_db(ledger_db, down_to_block)); let state_db_pruning_handle = - tokio::task::spawn_blocking(move || rollback_state_db(state_db, up_to_block)); + tokio::task::spawn_blocking(move || rollback_state_db(state_db, down_to_block)); let native_db_pruning_handle = - tokio::task::spawn_blocking(move || rollback_native_db(native_db, up_to_block)); + tokio::task::spawn_blocking(move || rollback_native_db(native_db, down_to_block)); future::join_all([ ledger_pruning_handle, diff --git a/crates/storage-ops/src/rollback/service.rs b/crates/storage-ops/src/rollback/service.rs index fccbe5212a..ba757d7534 100644 --- a/crates/storage-ops/src/rollback/service.rs +++ b/crates/storage-ops/src/rollback/service.rs @@ -7,11 +7,11 @@ use super::Rollback; pub struct RollbackService { rollback: Rollback, - receiver: Receiver, + receiver: Receiver<(u64, u64)>, } impl RollbackService { - pub fn new(rollback: Rollback, receiver: Receiver) -> Self { + pub fn new(rollback: Rollback, receiver: Receiver<(u64, u64)>) -> Self { Self { rollback, receiver } } @@ -23,9 +23,9 @@ impl RollbackService { _ = cancellation_token.cancelled() => { return; }, - Some(num_blocks) = self.receiver.recv() => { + Some((current_l2_height, num_blocks)) = self.receiver.recv() => { info!("Received signal to rollback {num_blocks} blocks"); - if let Err(e) = self.rollback.execute(num_blocks).await { + if let Err(e) = self.rollback.execute(current_l2_height, num_blocks).await { panic!("Could not rollback blocks: {:?}", e); } } From f43d5c45f4c1560606d518323a074e325f463048 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Tue, 18 Feb 2025 13:49:07 +0300 Subject: [PATCH 03/45] Rename handles --- crates/storage-ops/src/rollback/mod.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/storage-ops/src/rollback/mod.rs b/crates/storage-ops/src/rollback/mod.rs index a7d81b98db..b25e806d0e 100644 --- a/crates/storage-ops/src/rollback/mod.rs +++ b/crates/storage-ops/src/rollback/mod.rs @@ -40,19 +40,19 @@ impl Rollback { let down_to_block = current_l2_height - num_blocks + 1; - let ledger_pruning_handle = + let ledger_rollback_handle = tokio::task::spawn_blocking(move || rollback_ledger_db(ledger_db, down_to_block)); - let state_db_pruning_handle = + let state_db_rollback_handle = tokio::task::spawn_blocking(move || rollback_state_db(state_db, down_to_block)); - let native_db_pruning_handle = + let native_db_rollback_handle = tokio::task::spawn_blocking(move || rollback_native_db(native_db, down_to_block)); future::join_all([ - ledger_pruning_handle, - state_db_pruning_handle, - native_db_pruning_handle, + ledger_rollback_handle, + state_db_rollback_handle, + native_db_rollback_handle, ]) .await; From 2a9269d392985e14ca7734e68e676d191b35bb2f Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Tue, 18 Feb 2025 13:49:34 +0300 Subject: [PATCH 04/45] Remove dead code marker --- crates/storage-ops/src/rollback/components/state_db.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/storage-ops/src/rollback/components/state_db.rs b/crates/storage-ops/src/rollback/components/state_db.rs index a918f10353..4cd061afea 100644 --- a/crates/storage-ops/src/rollback/components/state_db.rs +++ b/crates/storage-ops/src/rollback/components/state_db.rs @@ -6,7 +6,6 @@ use sov_schema_db::SchemaBatch; use tracing::{error, info}; /// Rollback state DB -#[allow(dead_code)] pub(crate) fn rollback_state_db(state_db: Arc, down_to_block: u64) { info!("Rolling back state DB, down to L2 block {}", down_to_block); From 84b069c46ae2c4d48ab7b945b54aa92a1f1f46ac Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Tue, 18 Feb 2025 14:32:26 +0300 Subject: [PATCH 05/45] Rename PruningNodeType to StorageNodeType --- bin/citrea/src/main.rs | 4 +-- bin/citrea/tests/common/helpers.rs | 4 +-- bin/cli/src/commands/prune.rs | 28 +++++++++---------- bin/cli/src/main.rs | 4 +-- .../src/pruning/components/ledger_db/mod.rs | 12 ++++---- .../src/pruning/components/ledger_db/slots.rs | 12 ++++---- .../ledger_db/soft_confirmations.rs | 4 +-- crates/storage-ops/src/pruning/mod.rs | 4 +-- crates/storage-ops/src/pruning/service.rs | 4 +-- crates/storage-ops/src/pruning/types.rs | 2 +- crates/storage-ops/src/tests.rs | 14 +++++----- 11 files changed, 46 insertions(+), 46 deletions(-) diff --git a/bin/citrea/src/main.rs b/bin/citrea/src/main.rs index c12a1510d3..20f9f3a8b6 100644 --- a/bin/citrea/src/main.rs +++ b/bin/citrea/src/main.rs @@ -15,7 +15,7 @@ use citrea_common::{from_toml_path, FromEnv, FullNodeConfig}; use citrea_light_client_prover::da_block_handler::StartVariant; use citrea_stf::genesis_config::GenesisPaths; use citrea_stf::runtime::{CitreaRuntime, DefaultContext}; -use citrea_storage_ops::pruning::types::PruningNodeType; +use citrea_storage_ops::pruning::types::StorageNodeType; use clap::Parser; use metrics_exporter_prometheus::PrometheusBuilder; use metrics_util::MetricKindMask; @@ -369,7 +369,7 @@ where if let Some(pruner_service) = pruner_service { task_manager.spawn(|cancellation_token| async move { pruner_service - .run(PruningNodeType::FullNode, cancellation_token) + .run(StorageNodeType::FullNode, cancellation_token) .await }); } diff --git a/bin/citrea/tests/common/helpers.rs b/bin/citrea/tests/common/helpers.rs index 125c785fa0..2dd84f58d5 100644 --- a/bin/citrea/tests/common/helpers.rs +++ b/bin/citrea/tests/common/helpers.rs @@ -17,7 +17,7 @@ use citrea_common::{ use citrea_light_client_prover::da_block_handler::StartVariant; use citrea_primitives::TEST_PRIVATE_KEY; use citrea_stf::genesis_config::GenesisPaths; -use citrea_storage_ops::pruning::types::PruningNodeType; +use citrea_storage_ops::pruning::types::StorageNodeType; use citrea_storage_ops::pruning::PruningConfig; use sov_db::ledger_db::SharedLedgerOps; use sov_db::rocks_db_config::RocksdbConfig; @@ -338,7 +338,7 @@ pub async fn start_rollup( if let Some(pruner) = pruner { task_manager.spawn(|cancellation_token| async move { pruner - .run(PruningNodeType::FullNode, cancellation_token) + .run(StorageNodeType::FullNode, cancellation_token) .await }); } diff --git a/bin/cli/src/commands/prune.rs b/bin/cli/src/commands/prune.rs index 60498053b7..e539c00eb2 100644 --- a/bin/cli/src/commands/prune.rs +++ b/bin/cli/src/commands/prune.rs @@ -1,7 +1,7 @@ use std::path::PathBuf; use std::sync::Arc; -use citrea_storage_ops::pruning::types::PruningNodeType; +use citrea_storage_ops::pruning::types::StorageNodeType; use citrea_storage_ops::pruning::{Pruner, PruningConfig}; use clap::ValueEnum; use sov_db::ledger_db::{LedgerDB, SharedLedgerOps}; @@ -15,26 +15,26 @@ use sov_db::state_db::StateDB; use tracing::{debug, info}; #[derive(Copy, Clone, ValueEnum)] -pub enum PruningNodeTypeArg { +pub enum StorageNodeTypeArg { Sequencer, FullNode, BatchProver, LightClient, } -impl From for PruningNodeType { - fn from(value: PruningNodeTypeArg) -> Self { +impl From for StorageNodeType { + fn from(value: StorageNodeTypeArg) -> Self { match value { - PruningNodeTypeArg::Sequencer => PruningNodeType::Sequencer, - PruningNodeTypeArg::FullNode => PruningNodeType::FullNode, - PruningNodeTypeArg::BatchProver => PruningNodeType::BatchProver, - PruningNodeTypeArg::LightClient => PruningNodeType::LightClient, + StorageNodeTypeArg::Sequencer => StorageNodeType::Sequencer, + StorageNodeTypeArg::FullNode => StorageNodeType::FullNode, + StorageNodeTypeArg::BatchProver => StorageNodeType::BatchProver, + StorageNodeTypeArg::LightClient => StorageNodeType::LightClient, } } } pub(crate) async fn prune( - node_type: PruningNodeTypeArg, + node_type: StorageNodeTypeArg, db_path: PathBuf, distance: u64, ) -> anyhow::Result<()> { @@ -76,12 +76,12 @@ pub(crate) async fn prune( Ok(()) } -fn cfs_from_node_type(node_type: PruningNodeTypeArg) -> Vec { +fn cfs_from_node_type(node_type: StorageNodeTypeArg) -> Vec { let cfs = match node_type { - PruningNodeTypeArg::Sequencer => SEQUENCER_LEDGER_TABLES, - PruningNodeTypeArg::FullNode => FULL_NODE_LEDGER_TABLES, - PruningNodeTypeArg::BatchProver => BATCH_PROVER_LEDGER_TABLES, - PruningNodeTypeArg::LightClient => LIGHT_CLIENT_PROVER_LEDGER_TABLES, + StorageNodeTypeArg::Sequencer => SEQUENCER_LEDGER_TABLES, + StorageNodeTypeArg::FullNode => FULL_NODE_LEDGER_TABLES, + StorageNodeTypeArg::BatchProver => BATCH_PROVER_LEDGER_TABLES, + StorageNodeTypeArg::LightClient => LIGHT_CLIENT_PROVER_LEDGER_TABLES, }; cfs.iter().map(|x| x.to_string()).collect::>() diff --git a/bin/cli/src/main.rs b/bin/cli/src/main.rs index a1d87338e6..1c1b950ff8 100644 --- a/bin/cli/src/main.rs +++ b/bin/cli/src/main.rs @@ -1,7 +1,7 @@ use std::path::PathBuf; use clap::{Parser, Subcommand, ValueEnum}; -use commands::PruningNodeTypeArg; +use commands::StorageNodeTypeArg; use tracing_subscriber::fmt; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; @@ -41,7 +41,7 @@ enum Commands { /// Prune old DB entries Prune { #[arg(long)] - node_type: PruningNodeTypeArg, + node_type: StorageNodeTypeArg, /// The path of the database to prune #[arg(long)] db_path: PathBuf, diff --git a/crates/storage-ops/src/pruning/components/ledger_db/mod.rs b/crates/storage-ops/src/pruning/components/ledger_db/mod.rs index cc309e85f2..08bbb631e8 100644 --- a/crates/storage-ops/src/pruning/components/ledger_db/mod.rs +++ b/crates/storage-ops/src/pruning/components/ledger_db/mod.rs @@ -5,7 +5,7 @@ use tracing::{debug, error}; use self::slots::prune_slots; use self::soft_confirmations::prune_soft_confirmations; -use crate::pruning::types::PruningNodeType; +use crate::pruning::types::StorageNodeType; mod slots; mod soft_confirmations; @@ -28,32 +28,32 @@ macro_rules! log_result_or_error { } /// Prune ledger -pub(crate) fn prune_ledger(node_type: PruningNodeType, ledger_db: Arc, up_to_block: u64) { +pub(crate) fn prune_ledger(node_type: StorageNodeType, ledger_db: Arc, up_to_block: u64) { debug!("Pruning Ledger, up to L2 block {}", up_to_block); match node_type { - PruningNodeType::Sequencer => { + StorageNodeType::Sequencer => { log_result_or_error!( "soft_confirmations", prune_soft_confirmations(node_type, &ledger_db, up_to_block) ); log_result_or_error!("slots", prune_slots(node_type, &ledger_db, up_to_block)); } - PruningNodeType::FullNode => { + StorageNodeType::FullNode => { log_result_or_error!( "soft_confirmations", prune_soft_confirmations(node_type, &ledger_db, up_to_block) ); log_result_or_error!("slots", prune_slots(node_type, &ledger_db, up_to_block)); } - PruningNodeType::BatchProver => { + StorageNodeType::BatchProver => { log_result_or_error!( "soft_confirmations", prune_soft_confirmations(node_type, &ledger_db, up_to_block) ); log_result_or_error!("slots", prune_slots(node_type, &ledger_db, up_to_block)); } - PruningNodeType::LightClient => { + StorageNodeType::LightClient => { log_result_or_error!( "soft_confirmations", prune_soft_confirmations(node_type, &ledger_db, up_to_block) diff --git a/crates/storage-ops/src/pruning/components/ledger_db/slots.rs b/crates/storage-ops/src/pruning/components/ledger_db/slots.rs index 0ab8898cbd..e70990a6f7 100644 --- a/crates/storage-ops/src/pruning/components/ledger_db/slots.rs +++ b/crates/storage-ops/src/pruning/components/ledger_db/slots.rs @@ -5,10 +5,10 @@ use sov_db::schema::tables::{ use sov_db::schema::types::{SlotNumber, SoftConfirmationNumber}; use sov_schema_db::{ScanDirection, DB}; -use crate::pruning::types::PruningNodeType; +use crate::pruning::types::StorageNodeType; pub(crate) fn prune_slots( - node_type: PruningNodeType, + node_type: StorageNodeType, ledger_db: &DB, up_to_block: u64, ) -> anyhow::Result { @@ -31,20 +31,20 @@ pub(crate) fn prune_slots( ledger_db.delete::(&slot_height)?; ledger_db.delete::(&slot_height)?; - if !matches!(node_type, PruningNodeType::Sequencer) { + if !matches!(node_type, StorageNodeType::Sequencer) { prune_slot_by_hash(ledger_db, slot_height)?; } - if matches!(node_type, PruningNodeType::FullNode) { + if matches!(node_type, StorageNodeType::FullNode) { ledger_db.delete::(&slot_height)?; } - if matches!(node_type, PruningNodeType::BatchProver) { + if matches!(node_type, StorageNodeType::BatchProver) { ledger_db.delete::(&slot_height)?; ledger_db.delete::(&slot_height)?; } - if matches!(node_type, PruningNodeType::LightClient) { + if matches!(node_type, StorageNodeType::LightClient) { ledger_db.delete::(&slot_height)?; } diff --git a/crates/storage-ops/src/pruning/components/ledger_db/soft_confirmations.rs b/crates/storage-ops/src/pruning/components/ledger_db/soft_confirmations.rs index a2e1bf51b5..5b24e98c2d 100644 --- a/crates/storage-ops/src/pruning/components/ledger_db/soft_confirmations.rs +++ b/crates/storage-ops/src/pruning/components/ledger_db/soft_confirmations.rs @@ -5,10 +5,10 @@ use sov_db::schema::tables::{ use sov_db::schema::types::SoftConfirmationNumber; use sov_schema_db::{ScanDirection, DB}; -use crate::pruning::types::PruningNodeType; +use crate::pruning::types::StorageNodeType; pub(crate) fn prune_soft_confirmations( - node_type: PruningNodeType, + node_type: StorageNodeType, ledger_db: &DB, up_to_block: u64, ) -> anyhow::Result { diff --git a/crates/storage-ops/src/pruning/mod.rs b/crates/storage-ops/src/pruning/mod.rs index 17e634b45a..916ac4fbe7 100644 --- a/crates/storage-ops/src/pruning/mod.rs +++ b/crates/storage-ops/src/pruning/mod.rs @@ -4,7 +4,7 @@ use futures::future; use serde::{Deserialize, Serialize}; use sov_db::schema::tables::LastPrunedBlock; use tracing::info; -use types::PruningNodeType; +use types::StorageNodeType; use self::components::{prune_ledger, prune_native_db}; use self::criteria::{Criteria, DistanceCriteria}; @@ -70,7 +70,7 @@ impl Pruner { } /// Prune everything - pub async fn prune(&self, node_type: PruningNodeType, up_to_block: u64) { + pub async fn prune(&self, node_type: StorageNodeType, up_to_block: u64) { info!("Pruning up to L2 block: {}", up_to_block); let ledger_db = self.ledger_db.clone(); diff --git a/crates/storage-ops/src/pruning/service.rs b/crates/storage-ops/src/pruning/service.rs index 65aec00c8f..a4d1691d59 100644 --- a/crates/storage-ops/src/pruning/service.rs +++ b/crates/storage-ops/src/pruning/service.rs @@ -3,7 +3,7 @@ use tokio::sync::broadcast; use tokio_util::sync::CancellationToken; use tracing::{debug, error}; -use super::types::PruningNodeType; +use super::types::StorageNodeType; use super::Pruner; pub struct PrunerService { @@ -27,7 +27,7 @@ impl PrunerService { } } - pub async fn run(mut self, node_type: PruningNodeType, cancellation_token: CancellationToken) { + pub async fn run(mut self, node_type: StorageNodeType, cancellation_token: CancellationToken) { loop { select! { biased; diff --git a/crates/storage-ops/src/pruning/types.rs b/crates/storage-ops/src/pruning/types.rs index 1213a4a453..68f802e372 100644 --- a/crates/storage-ops/src/pruning/types.rs +++ b/crates/storage-ops/src/pruning/types.rs @@ -1,5 +1,5 @@ #[derive(Copy, Clone)] -pub enum PruningNodeType { +pub enum StorageNodeType { Sequencer, FullNode, BatchProver, diff --git a/crates/storage-ops/src/tests.rs b/crates/storage-ops/src/tests.rs index 20ef291775..602128bb41 100644 --- a/crates/storage-ops/src/tests.rs +++ b/crates/storage-ops/src/tests.rs @@ -23,7 +23,7 @@ use tokio_util::sync::CancellationToken; use crate::pruning::components::prune_ledger; use crate::pruning::criteria::{Criteria, DistanceCriteria}; -use crate::pruning::types::PruningNodeType; +use crate::pruning::types::StorageNodeType; use crate::pruning::{Pruner, PrunerService, PruningConfig}; #[tokio::test(flavor = "multi_thread")] @@ -45,7 +45,7 @@ async fn test_pruning_simple_run() { ); let pruner_service = PrunerService::new(pruner, 0, receiver); - tokio::spawn(pruner_service.run(PruningNodeType::Sequencer, cancellation_token.clone())); + tokio::spawn(pruner_service.run(StorageNodeType::Sequencer, cancellation_token.clone())); sleep(Duration::from_secs(1)); @@ -156,7 +156,7 @@ pub fn test_pruning_ledger_db_soft_confirmations() { .unwrap() .is_some()); - prune_ledger(PruningNodeType::Sequencer, ledger_db.clone(), 10); + prune_ledger(StorageNodeType::Sequencer, ledger_db.clone(), 10); // Pruned assert!(ledger_db @@ -307,7 +307,7 @@ pub fn test_pruning_ledger_db_batch_prover_soft_confirmations() { .unwrap() .is_some()); - prune_ledger(PruningNodeType::BatchProver, ledger_db.clone(), 10); + prune_ledger(StorageNodeType::BatchProver, ledger_db.clone(), 10); // Pruned assert!(ledger_db @@ -491,7 +491,7 @@ pub fn test_pruning_ledger_db_fullnode_slots() { prepare_slots_data(&ledger_db); - prune_ledger(PruningNodeType::FullNode, ledger_db.clone(), 10); + prune_ledger(StorageNodeType::FullNode, ledger_db.clone(), 10); // SHOULD NOT CHANGE assert!(ledger_db @@ -582,7 +582,7 @@ pub fn test_pruning_ledger_db_light_client_slots() { prepare_slots_data(&ledger_db); - prune_ledger(PruningNodeType::LightClient, ledger_db.clone(), 10); + prune_ledger(StorageNodeType::LightClient, ledger_db.clone(), 10); // SHOULD NOT CHANGE assert!(ledger_db @@ -673,7 +673,7 @@ pub fn test_pruning_ledger_db_batch_prover_slots() { prepare_slots_data(&ledger_db); - prune_ledger(PruningNodeType::BatchProver, ledger_db.clone(), 10); + prune_ledger(StorageNodeType::BatchProver, ledger_db.clone(), 10); // SHOULD NOT CHANGE assert!(ledger_db From c5575995d79822a6c66ca6128cbc9868b78b32a4 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Tue, 18 Feb 2025 14:58:23 +0300 Subject: [PATCH 06/45] Move macros to be used by other modules --- crates/storage-ops/src/lib.rs | 1 + crates/storage-ops/src/macros.rs | 18 ++++++++++++++++++ .../src/pruning/components/ledger_db/mod.rs | 18 +----------------- 3 files changed, 20 insertions(+), 17 deletions(-) create mode 100644 crates/storage-ops/src/macros.rs diff --git a/crates/storage-ops/src/lib.rs b/crates/storage-ops/src/lib.rs index 3f835e181e..bfef6f343a 100644 --- a/crates/storage-ops/src/lib.rs +++ b/crates/storage-ops/src/lib.rs @@ -1,5 +1,6 @@ pub mod pruning; pub mod rollback; +pub(crate) mod macros; #[cfg(test)] mod tests; diff --git a/crates/storage-ops/src/macros.rs b/crates/storage-ops/src/macros.rs new file mode 100644 index 0000000000..3bd4512e79 --- /dev/null +++ b/crates/storage-ops/src/macros.rs @@ -0,0 +1,18 @@ +#[macro_export] +macro_rules! log_result_or_error { + ($tables_group:literal, $call:expr) => {{ + match $call { + Ok(result) => { + tracing::debug!("Deleted {} records from {} group", result, $tables_group); + } + Err(e) => { + tracing::error!( + "Failed to prune ledger's {} table group: {:?}", + $tables_group, + e + ); + return; + } + } + }}; +} diff --git a/crates/storage-ops/src/pruning/components/ledger_db/mod.rs b/crates/storage-ops/src/pruning/components/ledger_db/mod.rs index 08bbb631e8..b10e497b02 100644 --- a/crates/storage-ops/src/pruning/components/ledger_db/mod.rs +++ b/crates/storage-ops/src/pruning/components/ledger_db/mod.rs @@ -5,28 +5,12 @@ use tracing::{debug, error}; use self::slots::prune_slots; use self::soft_confirmations::prune_soft_confirmations; +use crate::log_result_or_error; use crate::pruning::types::StorageNodeType; mod slots; mod soft_confirmations; -macro_rules! log_result_or_error { - ($tables_group:literal, $call:expr) => {{ - match $call { - Ok(result) => { - debug!("Deleted {} records from {} group", result, $tables_group); - } - Err(e) => { - error!( - "Failed to prune ledger's {} table group: {:?}", - $tables_group, e - ); - return; - } - } - }}; -} - /// Prune ledger pub(crate) fn prune_ledger(node_type: StorageNodeType, ledger_db: Arc, up_to_block: u64) { debug!("Pruning Ledger, up to L2 block {}", up_to_block); From d7fde2ea43b701fbbd6932f3178f17882d81a7f9 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Tue, 18 Feb 2025 14:59:21 +0300 Subject: [PATCH 07/45] Generalize ledger pruning --- crates/storage-ops/src/lib.rs | 1 + .../src/pruning/components/ledger_db/mod.rs | 6 +- .../src/pruning/components/ledger_db/slots.rs | 45 +---------- .../ledger_db/soft_confirmations.rs | 25 ++---- .../src/rollback/components/ledger_db.rs | 4 - .../src/rollback/components/ledger_db/mod.rs | 63 +++++++++++++++ .../rollback/components/ledger_db/slots.rs | 36 +++++++++ .../ledger_db/soft_confirmations.rs | 42 ++++++++++ crates/storage-ops/src/rollback/mod.rs | 14 +++- crates/storage-ops/src/rollback/service.rs | 5 +- crates/storage-ops/src/utils.rs | 78 +++++++++++++++++++ 11 files changed, 249 insertions(+), 70 deletions(-) delete mode 100644 crates/storage-ops/src/rollback/components/ledger_db.rs create mode 100644 crates/storage-ops/src/rollback/components/ledger_db/mod.rs create mode 100644 crates/storage-ops/src/rollback/components/ledger_db/slots.rs create mode 100644 crates/storage-ops/src/rollback/components/ledger_db/soft_confirmations.rs create mode 100644 crates/storage-ops/src/utils.rs diff --git a/crates/storage-ops/src/lib.rs b/crates/storage-ops/src/lib.rs index bfef6f343a..79f0997396 100644 --- a/crates/storage-ops/src/lib.rs +++ b/crates/storage-ops/src/lib.rs @@ -4,3 +4,4 @@ pub mod rollback; pub(crate) mod macros; #[cfg(test)] mod tests; +pub(crate) mod utils; diff --git a/crates/storage-ops/src/pruning/components/ledger_db/mod.rs b/crates/storage-ops/src/pruning/components/ledger_db/mod.rs index b10e497b02..2321e572bd 100644 --- a/crates/storage-ops/src/pruning/components/ledger_db/mod.rs +++ b/crates/storage-ops/src/pruning/components/ledger_db/mod.rs @@ -1,10 +1,10 @@ use std::sync::Arc; +use slots::prune_slots; +use soft_confirmations::prune_soft_confirmations; use sov_schema_db::DB; -use tracing::{debug, error}; +use tracing::debug; -use self::slots::prune_slots; -use self::soft_confirmations::prune_soft_confirmations; use crate::log_result_or_error; use crate::pruning::types::StorageNodeType; diff --git a/crates/storage-ops/src/pruning/components/ledger_db/slots.rs b/crates/storage-ops/src/pruning/components/ledger_db/slots.rs index e70990a6f7..c3e9778ae6 100644 --- a/crates/storage-ops/src/pruning/components/ledger_db/slots.rs +++ b/crates/storage-ops/src/pruning/components/ledger_db/slots.rs @@ -1,11 +1,9 @@ -use sov_db::schema::tables::{ - CommitmentsByNumber, L2RangeByL1Height, LightClientProofBySlotNumber, ProofsBySlotNumber, - ProofsBySlotNumberV2, SlotByHash, VerifiedBatchProofsBySlotNumber, -}; -use sov_db::schema::types::{SlotNumber, SoftConfirmationNumber}; +use sov_db::schema::tables::L2RangeByL1Height; +use sov_db::schema::types::SoftConfirmationNumber; use sov_schema_db::{ScanDirection, DB}; use crate::pruning::types::StorageNodeType; +use crate::utils::delete_slots_by_number; pub(crate) fn prune_slots( node_type: StorageNodeType, @@ -28,46 +26,11 @@ pub(crate) fn prune_slots( if slot_range.1 > SoftConfirmationNumber(up_to_block) { break; } - ledger_db.delete::(&slot_height)?; - ledger_db.delete::(&slot_height)?; - if !matches!(node_type, StorageNodeType::Sequencer) { - prune_slot_by_hash(ledger_db, slot_height)?; - } - - if matches!(node_type, StorageNodeType::FullNode) { - ledger_db.delete::(&slot_height)?; - } - - if matches!(node_type, StorageNodeType::BatchProver) { - ledger_db.delete::(&slot_height)?; - ledger_db.delete::(&slot_height)?; - } - - if matches!(node_type, StorageNodeType::LightClient) { - ledger_db.delete::(&slot_height)?; - } + delete_slots_by_number(node_type, ledger_db, slot_height)?; deleted += 1; } Ok(deleted) } - -fn prune_slot_by_hash(ledger_db: &DB, slot_height: SlotNumber) -> anyhow::Result<()> { - let mut slots = - ledger_db.iter_with_direction::(Default::default(), ScanDirection::Forward)?; - slots.seek_to_first(); - - for record in slots { - let Ok(record) = record else { - continue; - }; - - if record.value < slot_height { - ledger_db.delete::(&record.key)?; - } - } - - Ok(()) -} diff --git a/crates/storage-ops/src/pruning/components/ledger_db/soft_confirmations.rs b/crates/storage-ops/src/pruning/components/ledger_db/soft_confirmations.rs index 5b24e98c2d..3ec596b74c 100644 --- a/crates/storage-ops/src/pruning/components/ledger_db/soft_confirmations.rs +++ b/crates/storage-ops/src/pruning/components/ledger_db/soft_confirmations.rs @@ -1,11 +1,9 @@ -use sov_db::schema::tables::{ - L2Witness, ProverStateDiffs, SoftConfirmationByHash, SoftConfirmationByNumber, - SoftConfirmationStatus, -}; +use sov_db::schema::tables::SoftConfirmationByNumber; use sov_db::schema::types::SoftConfirmationNumber; use sov_schema_db::{ScanDirection, DB}; use crate::pruning::types::StorageNodeType; +use crate::utils::delete_soft_confirmations_by_number; pub(crate) fn prune_soft_confirmations( node_type: StorageNodeType, @@ -29,20 +27,13 @@ pub(crate) fn prune_soft_confirmations( if soft_confirmation_number > SoftConfirmationNumber(up_to_block) { break; } - ledger_db.delete::(&soft_confirmation_number)?; - if matches!(node_type, PruningNodeType::LightClient) { - continue; - } - - let soft_confirmation = record.value; - ledger_db.delete::(&soft_confirmation.hash)?; - ledger_db.delete::(&soft_confirmation_number)?; - - if matches!(node_type, PruningNodeType::BatchProver) { - ledger_db.delete::(&soft_confirmation_number)?; - ledger_db.delete::(&soft_confirmation_number)?; - } + delete_soft_confirmations_by_number( + node_type, + ledger_db, + soft_confirmation_number, + record.value.hash, + )?; deleted += 1; } diff --git a/crates/storage-ops/src/rollback/components/ledger_db.rs b/crates/storage-ops/src/rollback/components/ledger_db.rs deleted file mode 100644 index d3bff6cb70..0000000000 --- a/crates/storage-ops/src/rollback/components/ledger_db.rs +++ /dev/null @@ -1,4 +0,0 @@ -use std::sync::Arc; - -/// Rollback native DB -pub(crate) fn rollback_ledger_db(_ledger_db: Arc, _down_to_block: u64) {} diff --git a/crates/storage-ops/src/rollback/components/ledger_db/mod.rs b/crates/storage-ops/src/rollback/components/ledger_db/mod.rs new file mode 100644 index 0000000000..2b246f3451 --- /dev/null +++ b/crates/storage-ops/src/rollback/components/ledger_db/mod.rs @@ -0,0 +1,63 @@ +use std::sync::Arc; + +use slots::rollback_slots; +use soft_confirmations::rollback_soft_confirmations; +use tracing::debug; + +use crate::log_result_or_error; +use crate::pruning::types::StorageNodeType; + +mod slots; +mod soft_confirmations; + +/// Rollback native DB +pub(crate) fn rollback_ledger_db( + node_type: StorageNodeType, + ledger_db: Arc, + down_to_block: u64, +) { + debug!("Rolling back Ledger, down to L2 block {}", down_to_block); + + match node_type { + StorageNodeType::Sequencer => { + log_result_or_error!( + "soft_confirmations", + rollback_soft_confirmations(node_type, &ledger_db, down_to_block) + ); + log_result_or_error!( + "slots", + rollback_slots(node_type, &ledger_db, down_to_block) + ); + } + StorageNodeType::FullNode => { + log_result_or_error!( + "soft_confirmations", + rollback_soft_confirmations(node_type, &ledger_db, down_to_block) + ); + log_result_or_error!( + "slots", + rollback_slots(node_type, &ledger_db, down_to_block) + ); + } + StorageNodeType::BatchProver => { + log_result_or_error!( + "soft_confirmations", + rollback_soft_confirmations(node_type, &ledger_db, down_to_block) + ); + log_result_or_error!( + "slots", + rollback_slots(node_type, &ledger_db, down_to_block) + ); + } + StorageNodeType::LightClient => { + log_result_or_error!( + "soft_confirmations", + rollback_soft_confirmations(node_type, &ledger_db, down_to_block) + ); + log_result_or_error!( + "slots", + rollback_slots(node_type, &ledger_db, down_to_block) + ); + } + } +} diff --git a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs new file mode 100644 index 0000000000..a381ec36fa --- /dev/null +++ b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs @@ -0,0 +1,36 @@ +use sov_db::schema::tables::L2RangeByL1Height; +use sov_db::schema::types::SoftConfirmationNumber; +use sov_schema_db::{ScanDirection, DB}; + +use crate::pruning::types::StorageNodeType; +use crate::utils::delete_slots_by_number; + +pub(crate) fn rollback_slots( + node_type: StorageNodeType, + ledger_db: &DB, + down_to_block: u64, +) -> anyhow::Result { + let mut slots_to_l2_range = ledger_db + .iter_with_direction::(Default::default(), ScanDirection::Backward)?; + slots_to_l2_range.seek_to_last(); + + let mut deleted = 0; + for record in slots_to_l2_range { + let Ok(record) = record else { + continue; + }; + + let slot_height = record.key; + let slot_range = record.value; + + if slot_range.0 < SoftConfirmationNumber(down_to_block) { + break; + } + + delete_slots_by_number(node_type, ledger_db, slot_height)?; + + deleted += 1; + } + + Ok(deleted) +} diff --git a/crates/storage-ops/src/rollback/components/ledger_db/soft_confirmations.rs b/crates/storage-ops/src/rollback/components/ledger_db/soft_confirmations.rs new file mode 100644 index 0000000000..0a01be0d47 --- /dev/null +++ b/crates/storage-ops/src/rollback/components/ledger_db/soft_confirmations.rs @@ -0,0 +1,42 @@ +use sov_db::schema::tables::SoftConfirmationByNumber; +use sov_db::schema::types::SoftConfirmationNumber; +use sov_schema_db::{ScanDirection, DB}; + +use crate::pruning::types::StorageNodeType; +use crate::utils::delete_soft_confirmations_by_number; + +pub(crate) fn rollback_soft_confirmations( + node_type: StorageNodeType, + ledger_db: &DB, + down_to_block: u64, +) -> anyhow::Result { + let mut soft_confirmations = ledger_db.iter_with_direction::( + Default::default(), + ScanDirection::Backward, + )?; + soft_confirmations.seek_to_last(); + + let mut deleted = 0; + for record in soft_confirmations { + let Ok(record) = record else { + continue; + }; + + let soft_confirmation_number = record.key; + + if soft_confirmation_number < SoftConfirmationNumber(down_to_block) { + break; + } + + delete_soft_confirmations_by_number( + node_type, + ledger_db, + soft_confirmation_number, + record.value.hash, + )?; + + deleted += 1; + } + + Ok(deleted) +} diff --git a/crates/storage-ops/src/rollback/mod.rs b/crates/storage-ops/src/rollback/mod.rs index b25e806d0e..0048607440 100644 --- a/crates/storage-ops/src/rollback/mod.rs +++ b/crates/storage-ops/src/rollback/mod.rs @@ -4,6 +4,8 @@ use components::{rollback_ledger_db, rollback_native_db, rollback_state_db}; use futures::future; use tracing::info; +use crate::pruning::types::StorageNodeType; + mod components; pub mod service; @@ -31,7 +33,12 @@ impl Rollback { } /// Rollback the provided number of blocks - pub async fn execute(&self, current_l2_height: u64, num_blocks: u64) -> anyhow::Result<()> { + pub async fn execute( + &self, + node_type: StorageNodeType, + current_l2_height: u64, + num_blocks: u64, + ) -> anyhow::Result<()> { info!("Rolling back by {} blocks", num_blocks); let ledger_db = self.ledger_db.clone(); @@ -40,8 +47,9 @@ impl Rollback { let down_to_block = current_l2_height - num_blocks + 1; - let ledger_rollback_handle = - tokio::task::spawn_blocking(move || rollback_ledger_db(ledger_db, down_to_block)); + let ledger_rollback_handle = tokio::task::spawn_blocking(move || { + rollback_ledger_db(node_type, ledger_db, down_to_block) + }); let state_db_rollback_handle = tokio::task::spawn_blocking(move || rollback_state_db(state_db, down_to_block)); diff --git a/crates/storage-ops/src/rollback/service.rs b/crates/storage-ops/src/rollback/service.rs index ba757d7534..1cf131e753 100644 --- a/crates/storage-ops/src/rollback/service.rs +++ b/crates/storage-ops/src/rollback/service.rs @@ -4,6 +4,7 @@ use tokio_util::sync::CancellationToken; use tracing::info; use super::Rollback; +use crate::pruning::types::StorageNodeType; pub struct RollbackService { rollback: Rollback, @@ -16,7 +17,7 @@ impl RollbackService { } /// Run service to rollback when instructed to - pub async fn run(mut self, cancellation_token: CancellationToken) { + pub async fn run(mut self, node_type: StorageNodeType, cancellation_token: CancellationToken) { loop { select! { biased; @@ -25,7 +26,7 @@ impl RollbackService { }, Some((current_l2_height, num_blocks)) = self.receiver.recv() => { info!("Received signal to rollback {num_blocks} blocks"); - if let Err(e) = self.rollback.execute(current_l2_height, num_blocks).await { + if let Err(e) = self.rollback.execute(node_type, current_l2_height, num_blocks).await { panic!("Could not rollback blocks: {:?}", e); } } diff --git a/crates/storage-ops/src/utils.rs b/crates/storage-ops/src/utils.rs new file mode 100644 index 0000000000..c8270d20fd --- /dev/null +++ b/crates/storage-ops/src/utils.rs @@ -0,0 +1,78 @@ +use sov_db::schema::tables::{ + CommitmentsByNumber, L2RangeByL1Height, L2Witness, LightClientProofBySlotNumber, + ProofsBySlotNumber, ProofsBySlotNumberV2, ProverStateDiffs, SlotByHash, SoftConfirmationByHash, + SoftConfirmationByNumber, SoftConfirmationStatus, VerifiedBatchProofsBySlotNumber, +}; +use sov_db::schema::types::{DbHash, SlotNumber, SoftConfirmationNumber}; +use sov_schema_db::{ScanDirection, DB}; + +use crate::pruning::types::StorageNodeType; + +pub(crate) fn delete_soft_confirmations_by_number( + node_type: StorageNodeType, + ledger_db: &DB, + soft_confirmation_number: SoftConfirmationNumber, + soft_confirmation_hash: DbHash, +) -> anyhow::Result<()> { + ledger_db.delete::(&soft_confirmation_number)?; + + if matches!(node_type, StorageNodeType::LightClient) { + return Ok(()); + } + + ledger_db.delete::(&soft_confirmation_hash)?; + ledger_db.delete::(&soft_confirmation_number)?; + + if matches!(node_type, StorageNodeType::BatchProver) { + ledger_db.delete::(&soft_confirmation_number)?; + ledger_db.delete::(&soft_confirmation_number)?; + } + + Ok(()) +} + +pub(crate) fn delete_slots_by_number( + node_type: StorageNodeType, + ledger_db: &DB, + slot_number: SlotNumber, +) -> anyhow::Result<()> { + ledger_db.delete::(&slot_number)?; + ledger_db.delete::(&slot_number)?; + + if !matches!(node_type, StorageNodeType::Sequencer) { + delete_slot_by_hash(ledger_db, slot_number)?; + } + + if matches!(node_type, StorageNodeType::FullNode) { + ledger_db.delete::(&slot_number)?; + } + + if matches!(node_type, StorageNodeType::BatchProver) { + ledger_db.delete::(&slot_number)?; + ledger_db.delete::(&slot_number)?; + } + + if matches!(node_type, StorageNodeType::LightClient) { + ledger_db.delete::(&slot_number)?; + } + + Ok(()) +} + +fn delete_slot_by_hash(ledger_db: &DB, slot_number: SlotNumber) -> anyhow::Result<()> { + let mut slots = + ledger_db.iter_with_direction::(Default::default(), ScanDirection::Forward)?; + slots.seek_to_first(); + + for record in slots { + let Ok(record) = record else { + continue; + }; + + if record.value < slot_number { + ledger_db.delete::(&record.key)?; + } + } + + Ok(()) +} From 293c4fd09da0704e44340a755777fe24c522c7fd Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Tue, 18 Feb 2025 14:59:34 +0300 Subject: [PATCH 08/45] Pass node type to rollback --- bin/cli/src/commands/mod.rs | 42 +++++++++++++++++++++++++++++--- bin/cli/src/commands/prune.rs | 37 ++-------------------------- bin/cli/src/commands/rollback.rs | 15 +++++++++--- bin/cli/src/main.rs | 10 ++++++-- 4 files changed, 61 insertions(+), 43 deletions(-) diff --git a/bin/cli/src/commands/mod.rs b/bin/cli/src/commands/mod.rs index 58e864a9b8..7142ae85ba 100644 --- a/bin/cli/src/commands/mod.rs +++ b/bin/cli/src/commands/mod.rs @@ -1,7 +1,43 @@ +pub(crate) use backup::*; +use citrea_storage_ops::pruning::types::StorageNodeType; +use clap::ValueEnum; +pub(crate) use prune::*; +pub(crate) use rollback::*; +use sov_db::schema::tables::{ + BATCH_PROVER_LEDGER_TABLES, FULL_NODE_LEDGER_TABLES, LIGHT_CLIENT_PROVER_LEDGER_TABLES, + SEQUENCER_LEDGER_TABLES, +}; + mod backup; mod prune; mod rollback; -pub(crate) use backup::*; -pub(crate) use prune::*; -pub(crate) use rollback::*; +#[derive(Copy, Clone, ValueEnum)] +pub enum StorageNodeTypeArg { + Sequencer, + FullNode, + BatchProver, + LightClient, +} + +impl From for StorageNodeType { + fn from(value: StorageNodeTypeArg) -> Self { + match value { + StorageNodeTypeArg::Sequencer => StorageNodeType::Sequencer, + StorageNodeTypeArg::FullNode => StorageNodeType::FullNode, + StorageNodeTypeArg::BatchProver => StorageNodeType::BatchProver, + StorageNodeTypeArg::LightClient => StorageNodeType::LightClient, + } + } +} + +pub(crate) fn cfs_from_node_type(node_type: StorageNodeTypeArg) -> Vec { + let cfs = match node_type { + StorageNodeTypeArg::Sequencer => SEQUENCER_LEDGER_TABLES, + StorageNodeTypeArg::FullNode => FULL_NODE_LEDGER_TABLES, + StorageNodeTypeArg::BatchProver => BATCH_PROVER_LEDGER_TABLES, + StorageNodeTypeArg::LightClient => LIGHT_CLIENT_PROVER_LEDGER_TABLES, + }; + + cfs.iter().map(|x| x.to_string()).collect::>() +} diff --git a/bin/cli/src/commands/prune.rs b/bin/cli/src/commands/prune.rs index e539c00eb2..b459ed390c 100644 --- a/bin/cli/src/commands/prune.rs +++ b/bin/cli/src/commands/prune.rs @@ -1,37 +1,15 @@ use std::path::PathBuf; use std::sync::Arc; -use citrea_storage_ops::pruning::types::StorageNodeType; use citrea_storage_ops::pruning::{Pruner, PruningConfig}; -use clap::ValueEnum; use sov_db::ledger_db::{LedgerDB, SharedLedgerOps}; use sov_db::native_db::NativeDB; use sov_db::rocks_db_config::RocksdbConfig; -use sov_db::schema::tables::{ - BATCH_PROVER_LEDGER_TABLES, FULL_NODE_LEDGER_TABLES, LIGHT_CLIENT_PROVER_LEDGER_TABLES, - SEQUENCER_LEDGER_TABLES, -}; use sov_db::state_db::StateDB; use tracing::{debug, info}; -#[derive(Copy, Clone, ValueEnum)] -pub enum StorageNodeTypeArg { - Sequencer, - FullNode, - BatchProver, - LightClient, -} - -impl From for StorageNodeType { - fn from(value: StorageNodeTypeArg) -> Self { - match value { - StorageNodeTypeArg::Sequencer => StorageNodeType::Sequencer, - StorageNodeTypeArg::FullNode => StorageNodeType::FullNode, - StorageNodeTypeArg::BatchProver => StorageNodeType::BatchProver, - StorageNodeTypeArg::LightClient => StorageNodeType::LightClient, - } - } -} +use super::StorageNodeTypeArg; +use crate::commands::cfs_from_node_type; pub(crate) async fn prune( node_type: StorageNodeTypeArg, @@ -75,14 +53,3 @@ pub(crate) async fn prune( } Ok(()) } - -fn cfs_from_node_type(node_type: StorageNodeTypeArg) -> Vec { - let cfs = match node_type { - StorageNodeTypeArg::Sequencer => SEQUENCER_LEDGER_TABLES, - StorageNodeTypeArg::FullNode => FULL_NODE_LEDGER_TABLES, - StorageNodeTypeArg::BatchProver => BATCH_PROVER_LEDGER_TABLES, - StorageNodeTypeArg::LightClient => LIGHT_CLIENT_PROVER_LEDGER_TABLES, - }; - - cfs.iter().map(|x| x.to_string()).collect::>() -} diff --git a/bin/cli/src/commands/rollback.rs b/bin/cli/src/commands/rollback.rs index 2096ad1021..93e5beabf4 100644 --- a/bin/cli/src/commands/rollback.rs +++ b/bin/cli/src/commands/rollback.rs @@ -8,14 +8,23 @@ use sov_db::rocks_db_config::RocksdbConfig; use sov_db::state_db::StateDB; use tracing::info; -pub(crate) async fn rollback(db_path: PathBuf, num_blocks: u64) -> anyhow::Result<()> { +use super::StorageNodeTypeArg; +use crate::commands::cfs_from_node_type; + +pub(crate) async fn rollback( + node_type: StorageNodeTypeArg, + db_path: PathBuf, + num_blocks: u64, +) -> anyhow::Result<()> { info!( "Rolling back DB at {} {} down", db_path.display(), num_blocks ); - let rocksdb_config = RocksdbConfig::new(&db_path, None, None); + let column_families = cfs_from_node_type(node_type); + + let rocksdb_config = RocksdbConfig::new(&db_path, None, Some(column_families.to_vec())); let ledger_db = LedgerDB::with_config(&rocksdb_config)?; let native_db = NativeDB::setup_schema_db(&rocksdb_config)?; let state_db = StateDB::setup_schema_db(&rocksdb_config)?; @@ -26,7 +35,7 @@ pub(crate) async fn rollback(db_path: PathBuf, num_blocks: u64) -> anyhow::Resul let rollback = Rollback::new(ledger_db.inner(), Arc::new(state_db), Arc::new(native_db)); rollback - .execute(soft_confirmation_number, num_blocks) + .execute(node_type.into(), soft_confirmation_number, num_blocks) .await?; Ok(()) diff --git a/bin/cli/src/main.rs b/bin/cli/src/main.rs index 1c1b950ff8..884f520f5a 100644 --- a/bin/cli/src/main.rs +++ b/bin/cli/src/main.rs @@ -51,6 +51,8 @@ enum Commands { }, /// Rollback the most recent N blocks Rollback { + #[arg(long)] + node_type: StorageNodeTypeArg, /// The path of the database to prune #[arg(long)] db_path: PathBuf, @@ -88,8 +90,12 @@ async fn main() -> anyhow::Result<()> { } => { commands::prune(node_type, db_path.clone(), distance).await?; } - Commands::Rollback { db_path, blocks } => { - commands::rollback(db_path.clone(), blocks).await?; + Commands::Rollback { + node_type, + db_path, + blocks, + } => { + commands::rollback(node_type, db_path.clone(), blocks).await?; } Commands::Backup { db_path, From ced439587ad785e2b55c712b614a150174771f1c Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Tue, 18 Feb 2025 15:09:09 +0300 Subject: [PATCH 09/45] Calculate down to version value --- crates/storage-ops/src/rollback/components/native_db.rs | 4 +++- crates/storage-ops/src/rollback/components/state_db.rs | 4 ++-- crates/storage-ops/src/rollback/mod.rs | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/storage-ops/src/rollback/components/native_db.rs b/crates/storage-ops/src/rollback/components/native_db.rs index 70f15e319c..05f8bad27e 100644 --- a/crates/storage-ops/src/rollback/components/native_db.rs +++ b/crates/storage-ops/src/rollback/components/native_db.rs @@ -7,6 +7,8 @@ use tracing::{debug, error}; pub(crate) fn rollback_native_db(native_db: Arc, down_to_block: u64) { debug!("Rolling back native DB, down to L2 block {}", down_to_block); + let target_version = down_to_block + 1; + let Ok(mut iter) = native_db.iter::() else { return; }; @@ -18,7 +20,7 @@ pub(crate) fn rollback_native_db(native_db: Arc, down_to_bloc while let Some(Ok(entry)) = iter.next() { let version = entry.key.1; // The version value is always ahead of block number by one. - if version >= down_to_block + 1 { + if version >= target_version { keys_to_delete.push(entry.key); counter += 1; } diff --git a/crates/storage-ops/src/rollback/components/state_db.rs b/crates/storage-ops/src/rollback/components/state_db.rs index 4cd061afea..74e34c5d07 100644 --- a/crates/storage-ops/src/rollback/components/state_db.rs +++ b/crates/storage-ops/src/rollback/components/state_db.rs @@ -9,7 +9,7 @@ use tracing::{error, info}; pub(crate) fn rollback_state_db(state_db: Arc, down_to_block: u64) { info!("Rolling back state DB, down to L2 block {}", down_to_block); - let to_version = down_to_block + 1; + let target_version = down_to_block + 1; let mut indices = state_db .iter::() @@ -29,7 +29,7 @@ pub(crate) fn rollback_state_db(state_db: Arc, down_to_block: let node = index.value; // Exit loop if we go down below the target block - if node_key.version() < to_version { + if node_key.version() < target_version { break; } diff --git a/crates/storage-ops/src/rollback/mod.rs b/crates/storage-ops/src/rollback/mod.rs index 0048607440..fce8800fa0 100644 --- a/crates/storage-ops/src/rollback/mod.rs +++ b/crates/storage-ops/src/rollback/mod.rs @@ -45,7 +45,7 @@ impl Rollback { let native_db = self.native_db.clone(); let state_db = self.state_db.clone(); - let down_to_block = current_l2_height - num_blocks + 1; + let down_to_block = (current_l2_height + 1) - num_blocks; let ledger_rollback_handle = tokio::task::spawn_blocking(move || { rollback_ledger_db(node_type, ledger_db, down_to_block) From 06d3638a7f932267f4c0830a7fed3272899ed438 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Wed, 19 Feb 2025 14:11:35 +0300 Subject: [PATCH 10/45] WIP test --- bin/citrea/tests/mock/mod.rs | 1 + bin/citrea/tests/mock/rollback.rs | 114 ++++++++++++++++++++++++++++++ 2 files changed, 115 insertions(+) create mode 100644 bin/citrea/tests/mock/rollback.rs diff --git a/bin/citrea/tests/mock/mod.rs b/bin/citrea/tests/mock/mod.rs index df44a7020f..1bf533e874 100644 --- a/bin/citrea/tests/mock/mod.rs +++ b/bin/citrea/tests/mock/mod.rs @@ -31,6 +31,7 @@ mod mempool; mod proving; mod pruning; mod reopen; +mod rollback; mod sequencer_behaviour; mod sequencer_replacement; mod soft_confirmation_rule_enforcer; diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs new file mode 100644 index 0000000000..3be8b2ed34 --- /dev/null +++ b/bin/citrea/tests/mock/rollback.rs @@ -0,0 +1,114 @@ +use std::collections::BTreeMap; +use std::panic::AssertUnwindSafe; +use std::str::FromStr; + +use alloy_primitives::Address; +use futures::FutureExt; +use reth_primitives::BlockNumberOrTag; +use sov_mock_da::{MockAddress, MockDaService}; + +use crate::common::helpers::{tempdir_with_children, wait_for_l1_block, wait_for_l2_block}; +use crate::mock::{initialize_test, TestConfig}; + +/// Trigger pruning native DB data. +#[tokio::test(flavor = "multi_thread")] +async fn test_native_db_rollback() -> Result<(), anyhow::Error> { + citrea::initialize_logging(tracing::Level::DEBUG); + + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); + + let da_service = MockDaService::new(MockAddress::default(), &da_db_dir.clone()); + + // start rollup on da block 3 + for _ in 0..3 { + da_service.publish_test_block().await.unwrap(); + } + wait_for_l1_block(&da_service, 3, None).await; + + let (seq_test_client, full_node_test_client, seq_task, full_node_task, _) = + initialize_test(TestConfig { + da_path: da_db_dir, + sequencer_path: sequencer_db_dir, + fullnode_path: fullnode_db_dir, + pruning_config: None, + ..Default::default() + }) + .await; + + let addr = Address::from_str("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92265").unwrap(); + let mut transactions = BTreeMap::new(); + let mut block_hashes = BTreeMap::new(); + + for i in 1..=50 { + // send one ether to some address + let pending = seq_test_client + .send_eth(addr, None, None, None, 1e18 as u128) + .await + .unwrap(); + + seq_test_client.spam_publish_batch_request().await.unwrap(); + + let tx_hash = pending.tx_hash(); + transactions.insert(i, *tx_hash); + + if i % 5 == 0 { + wait_for_l2_block(&seq_test_client, i, None).await; + + // Get the hash of the latest block + let block_hash = seq_test_client + .eth_get_block_by_number(Some(BlockNumberOrTag::Number(i))) + .await + .header + .hash; + block_hashes.insert(i, block_hash); + + da_service.publish_test_block().await.unwrap(); + + wait_for_l1_block(&da_service, 3 + (i / 5), None).await; + } + } + + seq_test_client.send_publish_batch_request().await; + wait_for_l2_block(&full_node_test_client, 51, None).await; + + // #################################### + // ROUND 1: FAIL + // ################################### + // This request is requesting data which has been pruned. + let check_block_by_number_result = AssertUnwindSafe( + full_node_test_client + .eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Number(1))), + ) + .catch_unwind() + .await; + assert!(check_block_by_number_result.is_err()); + + let get_block_receipts_result = AssertUnwindSafe(full_node_test_client.eth_get_block_receipts( + reth_primitives::BlockId::Number(BlockNumberOrTag::Number(1)), + )) + .catch_unwind() + .await; + assert!(get_block_receipts_result.is_err()); + + let get_block_by_hash_result = AssertUnwindSafe( + full_node_test_client.eth_get_block_by_hash(*block_hashes.get(&5).unwrap()), + ) + .catch_unwind() + .await; + assert!(get_block_by_hash_result.is_err()); + + let check_transaction_by_hash_result = AssertUnwindSafe( + full_node_test_client.eth_get_transaction_by_hash(*transactions.get(&1).unwrap(), None), + ) + .catch_unwind() + .await; + assert!(check_transaction_by_hash_result.unwrap().is_none()); + + seq_task.abort(); + full_node_task.abort(); + + Ok(()) +} From 4baf2e96a099276d2d98d21eff2a4496c5b059f6 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Wed, 19 Feb 2025 15:51:20 +0300 Subject: [PATCH 11/45] Fix merge breakage --- crates/storage-ops/src/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/storage-ops/src/tests.rs b/crates/storage-ops/src/tests.rs index d9db8b797e..0ff36f165a 100644 --- a/crates/storage-ops/src/tests.rs +++ b/crates/storage-ops/src/tests.rs @@ -47,7 +47,7 @@ async fn test_pruning_simple_run() { ); let pruner_service = PrunerService::new(pruner, 0, receiver); - tokio::spawn(pruner_service.run(PruningNodeType::Sequencer, cancellation_token.clone())); + tokio::spawn(pruner_service.run(StorageNodeType::Sequencer, cancellation_token.clone())); sleep(Duration::from_secs(1)); From ef20e2fecb995111581aea297219cd3f20856a8b Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Wed, 19 Feb 2025 15:51:28 +0300 Subject: [PATCH 12/45] Complete test --- bin/citrea/tests/mock/rollback.rs | 153 ++++++++++++++++++------------ 1 file changed, 93 insertions(+), 60 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index 3be8b2ed34..d3b0f07730 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -1,24 +1,35 @@ -use std::collections::BTreeMap; -use std::panic::AssertUnwindSafe; +use std::fs; use std::str::FromStr; +use std::sync::Arc; use alloy_primitives::Address; -use futures::FutureExt; +use citrea_common::SequencerConfig; +use citrea_stf::genesis_config::GenesisPaths; +use citrea_storage_ops::pruning::types::StorageNodeType; +use citrea_storage_ops::rollback::Rollback; use reth_primitives::BlockNumberOrTag; +use sov_db::ledger_db::{LedgerDB, SharedLedgerOps}; +use sov_db::native_db::NativeDB; +use sov_db::rocks_db_config::RocksdbConfig; +use sov_db::schema::tables::SEQUENCER_LEDGER_TABLES; +use sov_db::state_db::StateDB; use sov_mock_da::{MockAddress, MockDaService}; -use crate::common::helpers::{tempdir_with_children, wait_for_l1_block, wait_for_l2_block}; -use crate::mock::{initialize_test, TestConfig}; +use crate::common::helpers::{ + create_default_rollup_config, start_rollup, tempdir_with_children, wait_for_l1_block, + wait_for_l2_block, NodeMode, +}; +use crate::common::TEST_DATA_GENESIS_PATH; +use crate::mock::evm::init_test_rollup; -/// Trigger pruning native DB data. +/// Trigger rollback native DB data. #[tokio::test(flavor = "multi_thread")] -async fn test_native_db_rollback() -> Result<(), anyhow::Error> { +async fn test_rollback() -> Result<(), anyhow::Error> { citrea::initialize_logging(tracing::Level::DEBUG); let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); - let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); let da_service = MockDaService::new(MockAddress::default(), &da_db_dir.clone()); @@ -28,42 +39,51 @@ async fn test_native_db_rollback() -> Result<(), anyhow::Error> { } wait_for_l1_block(&da_service, 3, None).await; - let (seq_test_client, full_node_test_client, seq_task, full_node_task, _) = - initialize_test(TestConfig { - da_path: da_db_dir, - sequencer_path: sequencer_db_dir, - fullnode_path: fullnode_db_dir, - pruning_config: None, - ..Default::default() - }) - .await; + let sequencer_config = SequencerConfig::default(); + let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let rollup_config = create_default_rollup_config( + true, + &sequencer_db_dir, + &da_db_dir, + NodeMode::SequencerNode, + None, + ); + + let sequencer_config1 = sequencer_config.clone(); + let task_manager = start_rollup( + seq_port_tx, + GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), + None, + None, + rollup_config, + Some(sequencer_config1), + None, + ) + .await; + + let seq_port = seq_port_rx.await.unwrap(); + let seq_test_client = init_test_rollup(seq_port).await; let addr = Address::from_str("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92265").unwrap(); - let mut transactions = BTreeMap::new(); - let mut block_hashes = BTreeMap::new(); for i in 1..=50 { // send one ether to some address - let pending = seq_test_client + let _ = seq_test_client .send_eth(addr, None, None, None, 1e18 as u128) .await .unwrap(); seq_test_client.spam_publish_batch_request().await.unwrap(); - let tx_hash = pending.tx_hash(); - transactions.insert(i, *tx_hash); - if i % 5 == 0 { wait_for_l2_block(&seq_test_client, i, None).await; // Get the hash of the latest block - let block_hash = seq_test_client + seq_test_client .eth_get_block_by_number(Some(BlockNumberOrTag::Number(i))) .await .header .hash; - block_hashes.insert(i, block_hash); da_service.publish_test_block().await.unwrap(); @@ -71,44 +91,57 @@ async fn test_native_db_rollback() -> Result<(), anyhow::Error> { } } - seq_test_client.send_publish_batch_request().await; - wait_for_l2_block(&full_node_test_client, 51, None).await; - - // #################################### - // ROUND 1: FAIL - // ################################### - // This request is requesting data which has been pruned. - let check_block_by_number_result = AssertUnwindSafe( - full_node_test_client - .eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Number(1))), - ) - .catch_unwind() - .await; - assert!(check_block_by_number_result.is_err()); - - let get_block_receipts_result = AssertUnwindSafe(full_node_test_client.eth_get_block_receipts( - reth_primitives::BlockId::Number(BlockNumberOrTag::Number(1)), - )) - .catch_unwind() - .await; - assert!(get_block_receipts_result.is_err()); - - let get_block_by_hash_result = AssertUnwindSafe( - full_node_test_client.eth_get_block_by_hash(*block_hashes.get(&5).unwrap()), - ) - .catch_unwind() - .await; - assert!(get_block_by_hash_result.is_err()); + task_manager.abort().await; + + fs::remove_file(format!("{}/ledger/LOCK", sequencer_db_dir.display())).unwrap(); + + let sequencer_tables = SEQUENCER_LEDGER_TABLES + .iter() + .map(|x| x.to_string()) + .collect::>(); + let rocksdb_config = + RocksdbConfig::new(&sequencer_db_dir, None, Some(sequencer_tables.to_vec())); + let ledger_db = LedgerDB::with_config(&rocksdb_config)?; + let native_db = Arc::new(NativeDB::setup_schema_db(&rocksdb_config)?); + let state_db = Arc::new(StateDB::setup_schema_db(&rocksdb_config)?); + let rollback = Rollback::new(ledger_db.inner(), state_db.clone(), native_db.clone()); + + rollback + .execute(StorageNodeType::Sequencer, 50, 10) + .await + .unwrap(); + + drop(rollback); + drop(state_db); + drop(native_db); + drop(ledger_db); + + let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let rollup_config = create_default_rollup_config( + true, + &sequencer_db_dir, + &da_db_dir, + NodeMode::SequencerNode, + None, + ); + let seq_task = tokio::spawn(async move { + start_rollup( + seq_port_tx, + GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), + None, + None, + rollup_config, + Some(sequencer_config), + None, + ) + .await; + }); + let seq_port = seq_port_rx.await.unwrap(); + let seq_test_client = init_test_rollup(seq_port).await; - let check_transaction_by_hash_result = AssertUnwindSafe( - full_node_test_client.eth_get_transaction_by_hash(*transactions.get(&1).unwrap(), None), - ) - .catch_unwind() - .await; - assert!(check_transaction_by_hash_result.unwrap().is_none()); + wait_for_l2_block(&seq_test_client, 40, None).await; seq_task.abort(); - full_node_task.abort(); Ok(()) } From f0a8e4c5bf4f3d1c60d034c071c14abc20a8e112 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Sat, 22 Feb 2025 17:10:12 +0300 Subject: [PATCH 13/45] Rollback to target L1 / L2 combo --- bin/cli/src/commands/rollback.rs | 15 +++++--- bin/cli/src/main.rs | 12 ++++--- .../src/rollback/components/ledger_db/mod.rs | 36 ++++++++----------- .../rollback/components/ledger_db/slots.rs | 13 ++++--- crates/storage-ops/src/rollback/mod.rs | 15 ++++---- crates/storage-ops/src/rollback/service.rs | 10 +++--- 6 files changed, 54 insertions(+), 47 deletions(-) diff --git a/bin/cli/src/commands/rollback.rs b/bin/cli/src/commands/rollback.rs index 93e5beabf4..6e80c21d0b 100644 --- a/bin/cli/src/commands/rollback.rs +++ b/bin/cli/src/commands/rollback.rs @@ -14,12 +14,14 @@ use crate::commands::cfs_from_node_type; pub(crate) async fn rollback( node_type: StorageNodeTypeArg, db_path: PathBuf, - num_blocks: u64, + l2_target: u64, + l1_target: u64, ) -> anyhow::Result<()> { info!( - "Rolling back DB at {} {} down", + "Rolling back DB at {} down to L2 {}, L1 {}", db_path.display(), - num_blocks + l2_target, + l1_target, ); let column_families = cfs_from_node_type(node_type); @@ -35,7 +37,12 @@ pub(crate) async fn rollback( let rollback = Rollback::new(ledger_db.inner(), Arc::new(state_db), Arc::new(native_db)); rollback - .execute(node_type.into(), soft_confirmation_number, num_blocks) + .execute( + node_type.into(), + soft_confirmation_number, + l2_target, + l1_target, + ) .await?; Ok(()) diff --git a/bin/cli/src/main.rs b/bin/cli/src/main.rs index 884f520f5a..a3c8f9ccc5 100644 --- a/bin/cli/src/main.rs +++ b/bin/cli/src/main.rs @@ -56,9 +56,12 @@ enum Commands { /// The path of the database to prune #[arg(long)] db_path: PathBuf, - /// The number of blocks to rollback + /// The target L2 block number to rollback to (non-inclusive) #[arg(long)] - blocks: u64, + l2_target: u64, + /// The target L1 block number to rollback to (non-inclusive) + #[arg(long)] + l1_target: u64, }, /// Backup DBs Backup { @@ -93,9 +96,10 @@ async fn main() -> anyhow::Result<()> { Commands::Rollback { node_type, db_path, - blocks, + l2_target, + l1_target, } => { - commands::rollback(node_type, db_path.clone(), blocks).await?; + commands::rollback(node_type, db_path.clone(), l2_target, l1_target).await?; } Commands::Backup { db_path, diff --git a/crates/storage-ops/src/rollback/components/ledger_db/mod.rs b/crates/storage-ops/src/rollback/components/ledger_db/mod.rs index 2b246f3451..cc43d3d5c8 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/mod.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/mod.rs @@ -14,50 +14,42 @@ mod soft_confirmations; pub(crate) fn rollback_ledger_db( node_type: StorageNodeType, ledger_db: Arc, - down_to_block: u64, + target_l2: u64, + target_l1: u64, ) { - debug!("Rolling back Ledger, down to L2 block {}", down_to_block); + debug!( + "Rolling back Ledger, down to L2 block {}, L1 block {}", + target_l2, target_l1 + ); match node_type { StorageNodeType::Sequencer => { log_result_or_error!( "soft_confirmations", - rollback_soft_confirmations(node_type, &ledger_db, down_to_block) - ); - log_result_or_error!( - "slots", - rollback_slots(node_type, &ledger_db, down_to_block) + rollback_soft_confirmations(node_type, &ledger_db, target_l2) ); + log_result_or_error!("slots", rollback_slots(node_type, &ledger_db, target_l1)); } StorageNodeType::FullNode => { log_result_or_error!( "soft_confirmations", - rollback_soft_confirmations(node_type, &ledger_db, down_to_block) - ); - log_result_or_error!( - "slots", - rollback_slots(node_type, &ledger_db, down_to_block) + rollback_soft_confirmations(node_type, &ledger_db, target_l2) ); + log_result_or_error!("slots", rollback_slots(node_type, &ledger_db, target_l1)); } StorageNodeType::BatchProver => { log_result_or_error!( "soft_confirmations", - rollback_soft_confirmations(node_type, &ledger_db, down_to_block) - ); - log_result_or_error!( - "slots", - rollback_slots(node_type, &ledger_db, down_to_block) + rollback_soft_confirmations(node_type, &ledger_db, target_l2) ); + log_result_or_error!("slots", rollback_slots(node_type, &ledger_db, target_l1)); } StorageNodeType::LightClient => { log_result_or_error!( "soft_confirmations", - rollback_soft_confirmations(node_type, &ledger_db, down_to_block) - ); - log_result_or_error!( - "slots", - rollback_slots(node_type, &ledger_db, down_to_block) + rollback_soft_confirmations(node_type, &ledger_db, target_l2) ); + log_result_or_error!("slots", rollback_slots(node_type, &ledger_db, target_l1)); } } } diff --git a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs index a381ec36fa..c4ef3e8d2c 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs @@ -1,5 +1,5 @@ -use sov_db::schema::tables::L2RangeByL1Height; -use sov_db::schema::types::SoftConfirmationNumber; +use sov_db::schema::tables::{L2RangeByL1Height, LastSequencerCommitmentSent}; +use sov_db::schema::types::{SlotNumber, SoftConfirmationNumber}; use sov_schema_db::{ScanDirection, DB}; use crate::pruning::types::StorageNodeType; @@ -8,7 +8,7 @@ use crate::utils::delete_slots_by_number; pub(crate) fn rollback_slots( node_type: StorageNodeType, ledger_db: &DB, - down_to_block: u64, + target_l1: u64, ) -> anyhow::Result { let mut slots_to_l2_range = ledger_db .iter_with_direction::(Default::default(), ScanDirection::Backward)?; @@ -23,10 +23,15 @@ pub(crate) fn rollback_slots( let slot_height = record.key; let slot_range = record.value; - if slot_range.0 < SoftConfirmationNumber(down_to_block) { + if slot_height <= SlotNumber(target_l1) { break; } + ledger_db.put::( + &(), + &SoftConfirmationNumber(slot_range.0 .0 - 1), + )?; + delete_slots_by_number(node_type, ledger_db, slot_height)?; deleted += 1; diff --git a/crates/storage-ops/src/rollback/mod.rs b/crates/storage-ops/src/rollback/mod.rs index fce8800fa0..4ff30ce29c 100644 --- a/crates/storage-ops/src/rollback/mod.rs +++ b/crates/storage-ops/src/rollback/mod.rs @@ -36,26 +36,25 @@ impl Rollback { pub async fn execute( &self, node_type: StorageNodeType, - current_l2_height: u64, - num_blocks: u64, + _current_l2_height: u64, + l2_target: u64, + l1_target: u64, ) -> anyhow::Result<()> { - info!("Rolling back by {} blocks", num_blocks); + info!("Rolling back until L2 {}, L1 {}", l2_target, l1_target); let ledger_db = self.ledger_db.clone(); let native_db = self.native_db.clone(); let state_db = self.state_db.clone(); - let down_to_block = (current_l2_height + 1) - num_blocks; - let ledger_rollback_handle = tokio::task::spawn_blocking(move || { - rollback_ledger_db(node_type, ledger_db, down_to_block) + rollback_ledger_db(node_type, ledger_db, l2_target, l1_target) }); let state_db_rollback_handle = - tokio::task::spawn_blocking(move || rollback_state_db(state_db, down_to_block)); + tokio::task::spawn_blocking(move || rollback_state_db(state_db, l2_target)); let native_db_rollback_handle = - tokio::task::spawn_blocking(move || rollback_native_db(native_db, down_to_block)); + tokio::task::spawn_blocking(move || rollback_native_db(native_db, l2_target)); future::join_all([ ledger_rollback_handle, diff --git a/crates/storage-ops/src/rollback/service.rs b/crates/storage-ops/src/rollback/service.rs index 1cf131e753..7f343d0162 100644 --- a/crates/storage-ops/src/rollback/service.rs +++ b/crates/storage-ops/src/rollback/service.rs @@ -8,11 +8,11 @@ use crate::pruning::types::StorageNodeType; pub struct RollbackService { rollback: Rollback, - receiver: Receiver<(u64, u64)>, + receiver: Receiver<(u64, u64, u64)>, } impl RollbackService { - pub fn new(rollback: Rollback, receiver: Receiver<(u64, u64)>) -> Self { + pub fn new(rollback: Rollback, receiver: Receiver<(u64, u64, u64)>) -> Self { Self { rollback, receiver } } @@ -24,9 +24,9 @@ impl RollbackService { _ = cancellation_token.cancelled() => { return; }, - Some((current_l2_height, num_blocks)) = self.receiver.recv() => { - info!("Received signal to rollback {num_blocks} blocks"); - if let Err(e) = self.rollback.execute(node_type, current_l2_height, num_blocks).await { + Some((current_l2_height, target_l2, target_l1)) = self.receiver.recv() => { + info!("Received signal to rollback to L2 {target_l2}, L1 {target_l1}"); + if let Err(e) = self.rollback.execute(node_type, current_l2_height, target_l2, target_l1).await { panic!("Could not rollback blocks: {:?}", e); } } From fa6cec81955ffaabadd543df2d5e7ae1d62b0101 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Sat, 22 Feb 2025 17:42:26 +0300 Subject: [PATCH 14/45] Fix test --- bin/citrea/tests/mock/rollback.rs | 21 +++++++++++++++---- .../ledger_db/soft_confirmations.rs | 2 +- .../src/rollback/components/native_db.rs | 5 ++++- .../src/rollback/components/state_db.rs | 6 +++--- 4 files changed, 25 insertions(+), 9 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index d3b0f07730..4b3860388a 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -19,7 +19,7 @@ use crate::common::helpers::{ create_default_rollup_config, start_rollup, tempdir_with_children, wait_for_l1_block, wait_for_l2_block, NodeMode, }; -use crate::common::TEST_DATA_GENESIS_PATH; +use crate::common::{make_test_client, TEST_DATA_GENESIS_PATH}; use crate::mock::evm::init_test_rollup; /// Trigger rollback native DB data. @@ -39,7 +39,10 @@ async fn test_rollback() -> Result<(), anyhow::Error> { } wait_for_l1_block(&da_service, 3, None).await; - let sequencer_config = SequencerConfig::default(); + let sequencer_config = SequencerConfig { + min_soft_confirmations_per_commitment: 10, + ..Default::default() + }; let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); let rollup_config = create_default_rollup_config( true, @@ -106,8 +109,18 @@ async fn test_rollback() -> Result<(), anyhow::Error> { let state_db = Arc::new(StateDB::setup_schema_db(&rocksdb_config)?); let rollback = Rollback::new(ledger_db.inner(), state_db.clone(), native_db.clone()); + // rollback 10 L2 blocks + let rollback_to_l2 = 40; + // We have 13 L1 blocks by now and we want to rollback + // the last 2. + let rollback_to_l1 = 11; rollback - .execute(StorageNodeType::Sequencer, 50, 10) + .execute( + StorageNodeType::Sequencer, + 50, + rollback_to_l2, + rollback_to_l1, + ) .await .unwrap(); @@ -137,7 +150,7 @@ async fn test_rollback() -> Result<(), anyhow::Error> { .await; }); let seq_port = seq_port_rx.await.unwrap(); - let seq_test_client = init_test_rollup(seq_port).await; + let seq_test_client = make_test_client(seq_port).await.unwrap(); wait_for_l2_block(&seq_test_client, 40, None).await; diff --git a/crates/storage-ops/src/rollback/components/ledger_db/soft_confirmations.rs b/crates/storage-ops/src/rollback/components/ledger_db/soft_confirmations.rs index 0a01be0d47..ff71daf194 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/soft_confirmations.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/soft_confirmations.rs @@ -24,7 +24,7 @@ pub(crate) fn rollback_soft_confirmations( let soft_confirmation_number = record.key; - if soft_confirmation_number < SoftConfirmationNumber(down_to_block) { + if soft_confirmation_number <= SoftConfirmationNumber(down_to_block) { break; } diff --git a/crates/storage-ops/src/rollback/components/native_db.rs b/crates/storage-ops/src/rollback/components/native_db.rs index 05f8bad27e..41a530aaf2 100644 --- a/crates/storage-ops/src/rollback/components/native_db.rs +++ b/crates/storage-ops/src/rollback/components/native_db.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use sov_db::schema::tables::ModuleAccessoryState; +use sov_schema_db::ScanDirection; use tracing::{debug, error}; /// Rollback native DB @@ -9,7 +10,9 @@ pub(crate) fn rollback_native_db(native_db: Arc, down_to_bloc let target_version = down_to_block + 1; - let Ok(mut iter) = native_db.iter::() else { + let Ok(mut iter) = native_db + .iter_with_direction::(Default::default(), ScanDirection::Backward) + else { return; }; diff --git a/crates/storage-ops/src/rollback/components/state_db.rs b/crates/storage-ops/src/rollback/components/state_db.rs index 74e34c5d07..5b2b2b86ad 100644 --- a/crates/storage-ops/src/rollback/components/state_db.rs +++ b/crates/storage-ops/src/rollback/components/state_db.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use jmt::storage::Node; use sov_db::schema::tables::{JmtNodes, JmtValues, KeyHashToKey}; -use sov_schema_db::SchemaBatch; +use sov_schema_db::{ScanDirection, SchemaBatch}; use tracing::{error, info}; /// Rollback state DB @@ -12,7 +12,7 @@ pub(crate) fn rollback_state_db(state_db: Arc, down_to_block: let target_version = down_to_block + 1; let mut indices = state_db - .iter::() + .iter_with_direction::(Default::default(), ScanDirection::Backward) .expect("Tried to rollback state DB but could not obtain an iterator"); indices.seek_to_last(); @@ -29,7 +29,7 @@ pub(crate) fn rollback_state_db(state_db: Arc, down_to_block: let node = index.value; // Exit loop if we go down below the target block - if node_key.version() < target_version { + if node_key.version() <= target_version { break; } From 52be3bb94c617a3b55665aa51cbf93443d12f6ce Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Sat, 22 Feb 2025 17:46:38 +0300 Subject: [PATCH 15/45] Add TODO --- crates/storage-ops/src/rollback/components/ledger_db/slots.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs index c4ef3e8d2c..b4858aff32 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs @@ -27,6 +27,10 @@ pub(crate) fn rollback_slots( break; } + // TODO: Figure out a way to set it to an actual + // commitment range L2 end. + // `CommitmentsByNumber` table is only populated by + // the batch prover. ledger_db.put::( &(), &SoftConfirmationNumber(slot_range.0 .0 - 1), From c07d48b10408d722a8ebd8ce9c4ff8ffd40bf77f Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Sat, 22 Feb 2025 17:46:43 +0300 Subject: [PATCH 16/45] Check rollback in every DB type --- bin/citrea/tests/mock/rollback.rs | 32 +++++++++++++++++-- .../src/rollback/components/native_db.rs | 2 +- 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index 4b3860388a..fee5945b89 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -1,13 +1,15 @@ use std::fs; +use std::panic::AssertUnwindSafe; use std::str::FromStr; use std::sync::Arc; -use alloy_primitives::Address; +use alloy_primitives::{Address, U256}; use citrea_common::SequencerConfig; use citrea_stf::genesis_config::GenesisPaths; use citrea_storage_ops::pruning::types::StorageNodeType; use citrea_storage_ops::rollback::Rollback; -use reth_primitives::BlockNumberOrTag; +use futures::FutureExt; +use reth_primitives::{BlockId, BlockNumberOrTag}; use sov_db::ledger_db::{LedgerDB, SharedLedgerOps}; use sov_db::native_db::NativeDB; use sov_db::rocks_db_config::RocksdbConfig; @@ -24,7 +26,7 @@ use crate::mock::evm::init_test_rollup; /// Trigger rollback native DB data. #[tokio::test(flavor = "multi_thread")] -async fn test_rollback() -> Result<(), anyhow::Error> { +async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { citrea::initialize_logging(tracing::Level::DEBUG); let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); @@ -152,8 +154,32 @@ async fn test_rollback() -> Result<(), anyhow::Error> { let seq_port = seq_port_rx.await.unwrap(); let seq_test_client = make_test_client(seq_port).await.unwrap(); + // Check soft confirmations have been rolled back in Ledger DB wait_for_l2_block(&seq_test_client, 40, None).await; + // Check state DB is rolled back. + let get_balance_result = seq_test_client + .eth_get_balance(addr, Some(BlockId::Number(BlockNumberOrTag::Latest))) + .await; + assert!(get_balance_result.is_ok()); + assert_eq!( + get_balance_result.unwrap(), + U256::from(40000000000000000000u128) + ); + + // Check native DB is rolled back + let check_block_by_number_result = AssertUnwindSafe( + seq_test_client.eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Number(41))), + ) + .catch_unwind() + .await; + assert!(check_block_by_number_result.is_err()); + + // Should NOT panic as the data we're requesting here is correct + seq_test_client + .eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Number(40))) + .await; + seq_task.abort(); Ok(()) diff --git a/crates/storage-ops/src/rollback/components/native_db.rs b/crates/storage-ops/src/rollback/components/native_db.rs index 41a530aaf2..a1fb372d60 100644 --- a/crates/storage-ops/src/rollback/components/native_db.rs +++ b/crates/storage-ops/src/rollback/components/native_db.rs @@ -23,7 +23,7 @@ pub(crate) fn rollback_native_db(native_db: Arc, down_to_bloc while let Some(Ok(entry)) = iter.next() { let version = entry.key.1; // The version value is always ahead of block number by one. - if version >= target_version { + if version > target_version { keys_to_delete.push(entry.key); counter += 1; } From 3d7dd216ff5f3c520283d7e9746787870e286008 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Sat, 22 Feb 2025 18:58:28 +0300 Subject: [PATCH 17/45] Add test for fullnode --- bin/citrea/tests/mock/rollback.rs | 251 +++++++++++++++++++++++------- 1 file changed, 198 insertions(+), 53 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index fee5945b89..d0df09bbf0 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -1,4 +1,3 @@ -use std::fs; use std::panic::AssertUnwindSafe; use std::str::FromStr; use std::sync::Arc; @@ -10,13 +9,15 @@ use citrea_storage_ops::pruning::types::StorageNodeType; use citrea_storage_ops::rollback::Rollback; use futures::FutureExt; use reth_primitives::{BlockId, BlockNumberOrTag}; +use sov_db::ledger_db::migrations::copy_db_dir_recursive; use sov_db::ledger_db::{LedgerDB, SharedLedgerOps}; use sov_db::native_db::NativeDB; use sov_db::rocks_db_config::RocksdbConfig; -use sov_db::schema::tables::SEQUENCER_LEDGER_TABLES; +use sov_db::schema::tables::{FULL_NODE_LEDGER_TABLES, SEQUENCER_LEDGER_TABLES}; use sov_db::state_db::StateDB; use sov_mock_da::{MockAddress, MockDaService}; +use crate::common::client::TestClient; use crate::common::helpers::{ create_default_rollup_config, start_rollup, tempdir_with_children, wait_for_l1_block, wait_for_l2_block, NodeMode, @@ -24,12 +25,60 @@ use crate::common::helpers::{ use crate::common::{make_test_client, TEST_DATA_GENESIS_PATH}; use crate::mock::evm::init_test_rollup; -/// Trigger rollback native DB data. +async fn fill_blocks(test_client: &TestClient, da_service: &MockDaService, addr: &Address) { + for i in 1..=50 { + // send one ether to some address + let _ = test_client + .send_eth(*addr, None, None, None, 1e18 as u128) + .await + .unwrap(); + + test_client.spam_publish_batch_request().await.unwrap(); + + if i % 5 == 0 { + wait_for_l2_block(test_client, i, None).await; + + da_service.publish_test_block().await.unwrap(); + + wait_for_l1_block(da_service, 3 + (i / 5), None).await; + } + } +} + +async fn assert_dbs(test_client: Box, addr: Address) { + // Check soft confirmations have been rolled back in Ledger DB + wait_for_l2_block(&test_client, 40, None).await; + + // Check state DB is rolled back. + let get_balance_result = test_client + .eth_get_balance(addr, Some(BlockId::Number(BlockNumberOrTag::Latest))) + .await; + assert!(get_balance_result.is_ok()); + assert_eq!( + get_balance_result.unwrap(), + U256::from(40000000000000000000u128) + ); + + // Check native DB is rolled back + let check_block_by_number_result = AssertUnwindSafe( + test_client.eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Number(41))), + ) + .catch_unwind() + .await; + assert!(check_block_by_number_result.is_err()); + + // Should NOT panic as the data we're requesting here is correct + test_client + .eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Number(40))) + .await; +} + +/// Trigger rollback DB data. #[tokio::test(flavor = "multi_thread")] async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { citrea::initialize_logging(tracing::Level::DEBUG); - let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let storage_dir = tempdir_with_children(&["DA", "sequencer"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); @@ -54,15 +103,15 @@ async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { None, ); - let sequencer_config1 = sequencer_config.clone(); let task_manager = start_rollup( seq_port_tx, GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), None, None, rollup_config, - Some(sequencer_config1), + Some(sequencer_config.clone()), None, + false, ) .await; @@ -71,41 +120,19 @@ async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { let addr = Address::from_str("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92265").unwrap(); - for i in 1..=50 { - // send one ether to some address - let _ = seq_test_client - .send_eth(addr, None, None, None, 1e18 as u128) - .await - .unwrap(); - - seq_test_client.spam_publish_batch_request().await.unwrap(); - - if i % 5 == 0 { - wait_for_l2_block(&seq_test_client, i, None).await; - - // Get the hash of the latest block - seq_test_client - .eth_get_block_by_number(Some(BlockNumberOrTag::Number(i))) - .await - .header - .hash; - - da_service.publish_test_block().await.unwrap(); - - wait_for_l1_block(&da_service, 3 + (i / 5), None).await; - } - } + fill_blocks(&seq_test_client, &da_service, &addr).await; task_manager.abort().await; - fs::remove_file(format!("{}/ledger/LOCK", sequencer_db_dir.display())).unwrap(); + let new_sequencer_db_dir = storage_dir.path().join("sequencer2").to_path_buf(); + copy_db_dir_recursive(&sequencer_db_dir, &new_sequencer_db_dir).unwrap(); let sequencer_tables = SEQUENCER_LEDGER_TABLES .iter() .map(|x| x.to_string()) .collect::>(); let rocksdb_config = - RocksdbConfig::new(&sequencer_db_dir, None, Some(sequencer_tables.to_vec())); + RocksdbConfig::new(&new_sequencer_db_dir, None, Some(sequencer_tables.to_vec())); let ledger_db = LedgerDB::with_config(&rocksdb_config)?; let native_db = Arc::new(NativeDB::setup_schema_db(&rocksdb_config)?); let state_db = Arc::new(StateDB::setup_schema_db(&rocksdb_config)?); @@ -134,7 +161,7 @@ async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); let rollup_config = create_default_rollup_config( true, - &sequencer_db_dir, + &new_sequencer_db_dir, &da_db_dir, NodeMode::SequencerNode, None, @@ -148,39 +175,157 @@ async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { rollup_config, Some(sequencer_config), None, + false, ) .await; }); let seq_port = seq_port_rx.await.unwrap(); let seq_test_client = make_test_client(seq_port).await.unwrap(); - // Check soft confirmations have been rolled back in Ledger DB - wait_for_l2_block(&seq_test_client, 40, None).await; + assert_dbs(seq_test_client, addr).await; - // Check state DB is rolled back. - let get_balance_result = seq_test_client - .eth_get_balance(addr, Some(BlockId::Number(BlockNumberOrTag::Latest))) - .await; - assert!(get_balance_result.is_ok()); - assert_eq!( - get_balance_result.unwrap(), - U256::from(40000000000000000000u128) + seq_task.abort(); + + Ok(()) +} + +/// Trigger rollback DB data. +#[tokio::test(flavor = "multi_thread")] +async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { + citrea::initialize_logging(tracing::Level::DEBUG); + + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); + + let da_service = MockDaService::new(MockAddress::default(), &da_db_dir.clone()); + + // start rollup on da block 3 + for _ in 0..3 { + da_service.publish_test_block().await.unwrap(); + } + wait_for_l1_block(&da_service, 3, None).await; + + let sequencer_config = SequencerConfig { + min_soft_confirmations_per_commitment: 10, + ..Default::default() + }; + let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let rollup_config = create_default_rollup_config( + true, + &sequencer_db_dir, + &da_db_dir, + NodeMode::SequencerNode, + None, ); - // Check native DB is rolled back - let check_block_by_number_result = AssertUnwindSafe( - seq_test_client.eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Number(41))), + let sequencer_config1 = sequencer_config.clone(); + let seq_task_manager = start_rollup( + seq_port_tx, + GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), + None, + None, + rollup_config, + Some(sequencer_config1), + None, + false, ) - .catch_unwind() .await; - assert!(check_block_by_number_result.is_err()); - // Should NOT panic as the data we're requesting here is correct - seq_test_client - .eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Number(40))) - .await; + let seq_port = seq_port_rx.await.unwrap(); + let seq_test_client = init_test_rollup(seq_port).await; - seq_task.abort(); + let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); + let rollup_config = create_default_rollup_config( + true, + &fullnode_db_dir, + &da_db_dir, + NodeMode::FullNode(seq_port), + None, + ); + let full_node_task_manager = start_rollup( + full_node_port_tx, + GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), + None, + None, + rollup_config, + None, + None, + false, + ) + .await; + let full_node_port = full_node_port_rx.await.unwrap(); + let full_node_test_client = init_test_rollup(full_node_port).await; + + let addr = Address::from_str("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92265").unwrap(); + + fill_blocks(&seq_test_client, &da_service, &addr).await; + + wait_for_l2_block(&full_node_test_client, 50, None).await; + + seq_task_manager.abort().await; + full_node_task_manager.abort().await; + + let new_full_node_db_dir = storage_dir.path().join("full-node2").to_path_buf(); + copy_db_dir_recursive(&fullnode_db_dir, &new_full_node_db_dir).unwrap(); + + let full_node_tables = FULL_NODE_LEDGER_TABLES + .iter() + .map(|x| x.to_string()) + .collect::>(); + let rocksdb_config = + RocksdbConfig::new(&new_full_node_db_dir, None, Some(full_node_tables.to_vec())); + let ledger_db = LedgerDB::with_config(&rocksdb_config)?; + let native_db = Arc::new(NativeDB::setup_schema_db(&rocksdb_config)?); + let state_db = Arc::new(StateDB::setup_schema_db(&rocksdb_config)?); + let rollback = Rollback::new(ledger_db.inner(), state_db.clone(), native_db.clone()); + + // rollback 10 L2 blocks + let rollback_to_l2 = 40; + // We have 13 L1 blocks by now and we want to rollback + // the last 2. + let rollback_to_l1 = 11; + rollback + .execute( + StorageNodeType::FullNode, + 50, + rollback_to_l2, + rollback_to_l1, + ) + .await + .unwrap(); + + drop(rollback); + drop(state_db); + drop(native_db); + drop(ledger_db); + + let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); + let rollup_config = create_default_rollup_config( + true, + &new_full_node_db_dir, + &da_db_dir, + NodeMode::FullNode(seq_port), + None, + ); + let full_node_task_manager = start_rollup( + full_node_port_tx, + GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), + None, + None, + rollup_config, + None, + None, + false, + ) + .await; + let full_node_port = full_node_port_rx.await.unwrap(); + let full_node_test_client = make_test_client(full_node_port).await.unwrap(); + + assert_dbs(full_node_test_client, addr).await; + + full_node_task_manager.abort().await; Ok(()) } From dc97a7e553db3fd7704b85e4a952d52262527ced Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Sat, 22 Feb 2025 19:17:18 +0300 Subject: [PATCH 18/45] Add prover test --- bin/citrea/tests/mock/rollback.rs | 160 +++++++++++++++++++++++++++++- 1 file changed, 158 insertions(+), 2 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index d0df09bbf0..bada368720 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -3,7 +3,7 @@ use std::str::FromStr; use std::sync::Arc; use alloy_primitives::{Address, U256}; -use citrea_common::SequencerConfig; +use citrea_common::{BatchProverConfig, SequencerConfig}; use citrea_stf::genesis_config::GenesisPaths; use citrea_storage_ops::pruning::types::StorageNodeType; use citrea_storage_ops::rollback::Rollback; @@ -13,7 +13,9 @@ use sov_db::ledger_db::migrations::copy_db_dir_recursive; use sov_db::ledger_db::{LedgerDB, SharedLedgerOps}; use sov_db::native_db::NativeDB; use sov_db::rocks_db_config::RocksdbConfig; -use sov_db::schema::tables::{FULL_NODE_LEDGER_TABLES, SEQUENCER_LEDGER_TABLES}; +use sov_db::schema::tables::{ + BATCH_PROVER_LEDGER_TABLES, FULL_NODE_LEDGER_TABLES, SEQUENCER_LEDGER_TABLES, +}; use sov_db::state_db::StateDB; use sov_mock_da::{MockAddress, MockDaService}; @@ -329,3 +331,157 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { Ok(()) } + +/// Trigger rollback DB data. +#[tokio::test(flavor = "multi_thread")] +async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { + citrea::initialize_logging(tracing::Level::DEBUG); + + let storage_dir = tempdir_with_children(&["DA", "sequencer", "batch-prover"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let batch_prover_db_dir = storage_dir.path().join("batch-prover").to_path_buf(); + + let da_service = MockDaService::new(MockAddress::default(), &da_db_dir.clone()); + + // start rollup on da block 3 + for _ in 0..3 { + da_service.publish_test_block().await.unwrap(); + } + wait_for_l1_block(&da_service, 3, None).await; + + let sequencer_config = SequencerConfig { + min_soft_confirmations_per_commitment: 10, + ..Default::default() + }; + let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let rollup_config = create_default_rollup_config( + true, + &sequencer_db_dir, + &da_db_dir, + NodeMode::SequencerNode, + None, + ); + + let sequencer_config1 = sequencer_config.clone(); + let seq_task_manager = start_rollup( + seq_port_tx, + GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), + None, + None, + rollup_config, + Some(sequencer_config1), + None, + false, + ) + .await; + + let seq_port = seq_port_rx.await.unwrap(); + let seq_test_client = init_test_rollup(seq_port).await; + + let (batch_prover_port_tx, batch_prover_port_rx) = tokio::sync::oneshot::channel(); + let rollup_config = create_default_rollup_config( + true, + &batch_prover_db_dir, + &da_db_dir, + NodeMode::Prover(seq_port), + None, + ); + let batch_prover_task_manager = start_rollup( + batch_prover_port_tx, + GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), + Some(BatchProverConfig { + proving_mode: citrea_common::ProverGuestRunConfig::Execute, + proof_sampling_number: 0, + enable_recovery: true, + }), + None, + rollup_config, + None, + None, + false, + ) + .await; + + let batch_prover_port = batch_prover_port_rx.await.unwrap(); + let batch_prover_test_client = init_test_rollup(batch_prover_port).await; + + let addr = Address::from_str("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92265").unwrap(); + + fill_blocks(&seq_test_client, &da_service, &addr).await; + + wait_for_l2_block(&batch_prover_test_client, 50, None).await; + + seq_task_manager.abort().await; + batch_prover_task_manager.abort().await; + + let new_batch_prover_db_dir = storage_dir.path().join("batch-prover2").to_path_buf(); + copy_db_dir_recursive(&batch_prover_db_dir, &new_batch_prover_db_dir).unwrap(); + + let batch_prover_tables = BATCH_PROVER_LEDGER_TABLES + .iter() + .map(|x| x.to_string()) + .collect::>(); + let rocksdb_config = RocksdbConfig::new( + &new_batch_prover_db_dir, + None, + Some(batch_prover_tables.to_vec()), + ); + let ledger_db = LedgerDB::with_config(&rocksdb_config)?; + let native_db = Arc::new(NativeDB::setup_schema_db(&rocksdb_config)?); + let state_db = Arc::new(StateDB::setup_schema_db(&rocksdb_config)?); + let rollback = Rollback::new(ledger_db.inner(), state_db.clone(), native_db.clone()); + + // rollback 10 L2 blocks + let rollback_to_l2 = 40; + // We have 13 L1 blocks by now and we want to rollback + // the last 2. + let rollback_to_l1 = 11; + rollback + .execute( + StorageNodeType::BatchProver, + 50, + rollback_to_l2, + rollback_to_l1, + ) + .await + .unwrap(); + + drop(rollback); + drop(state_db); + drop(native_db); + drop(ledger_db); + + let (batch_prover_port_tx, batch_prover_port_rx) = tokio::sync::oneshot::channel(); + let rollup_config = create_default_rollup_config( + true, + &new_batch_prover_db_dir, + &da_db_dir, + NodeMode::Prover(seq_port), + None, + ); + + let batch_prover_task_manager = start_rollup( + batch_prover_port_tx, + GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), + Some(BatchProverConfig { + proving_mode: citrea_common::ProverGuestRunConfig::Execute, + proof_sampling_number: 0, + enable_recovery: true, + }), + None, + rollup_config, + None, + None, + false, + ) + .await; + let batch_prover_port = batch_prover_port_rx.await.unwrap(); + let batch_prover_test_client = make_test_client(batch_prover_port).await.unwrap(); + + assert_dbs(batch_prover_test_client, addr).await; + + batch_prover_task_manager.abort().await; + + Ok(()) +} From 1e04de1de2e755603f87cad734cfa9d74722c094 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Mon, 24 Feb 2025 14:58:09 +0300 Subject: [PATCH 19/45] Add sov-schema-db --- Cargo.lock | 1 + bin/citrea/Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index c15d761a31..0c084e4e5f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1830,6 +1830,7 @@ dependencies = [ "sov-modules-stf-blueprint", "sov-prover-storage-manager", "sov-rollup-interface", + "sov-schema-db", "sov-state", "sp1-helper", "tempfile", diff --git a/bin/citrea/Cargo.toml b/bin/citrea/Cargo.toml index 5f77bcb1da..8559688319 100644 --- a/bin/citrea/Cargo.toml +++ b/bin/citrea/Cargo.toml @@ -74,6 +74,7 @@ citrea-risc0-adapter = { path = "../../crates/risc0", features = ["native", "tes sov-mock-da = { path = "../../crates/sovereign-sdk/adapters/mock-da", default-features = false } sov-prover-storage-manager = { path = "../../crates/sovereign-sdk/full-node/sov-prover-storage-manager", features = ["test-utils"] } sov-rollup-interface = { path = "../../crates/sovereign-sdk/rollup-interface", features = ["testing"] } +sov-schema-db = { path = "../../crates/sovereign-sdk/full-node/db/sov-schema-db" } alloy = { workspace = true, features = ["hyper", "consensus", "rpc-types-eth", "provider-http", "signers", "signer-local"] } alloy-rlp = { workspace = true } From 0bb52cd8155380d1bb26a865cde4742513083103 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Mon, 24 Feb 2025 16:45:25 +0300 Subject: [PATCH 20/45] Exclude provers from checking sequencer commitments --- .../rollback/components/ledger_db/slots.rs | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs index b4858aff32..fa8e022cd5 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs @@ -21,20 +21,24 @@ pub(crate) fn rollback_slots( }; let slot_height = record.key; - let slot_range = record.value; if slot_height <= SlotNumber(target_l1) { break; } - // TODO: Figure out a way to set it to an actual - // commitment range L2 end. - // `CommitmentsByNumber` table is only populated by - // the batch prover. - ledger_db.put::( - &(), - &SoftConfirmationNumber(slot_range.0 .0 - 1), - )?; + if matches!(node_type, StorageNodeType::Sequencer) + || matches!(node_type, StorageNodeType::FullNode) + { + let slot_range = record.value; + // TODO: Figure out a way to set it to an actual + // commitment range L2 end. + // `CommitmentsByNumber` table is only populated by + // the batch prover. + ledger_db.put::( + &(), + &SoftConfirmationNumber(slot_range.0 .0 - 1), + )?; + } delete_slots_by_number(node_type, ledger_db, slot_height)?; From b608d2a7ee3334d27dcdf6d8ff37de60065946c6 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Mon, 24 Feb 2025 16:45:37 +0300 Subject: [PATCH 21/45] Add more asserts in test --- bin/citrea/tests/mock/rollback.rs | 521 +++++++++++++++--------------- 1 file changed, 268 insertions(+), 253 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index bada368720..eea568704f 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -1,8 +1,11 @@ +use std::net::SocketAddr; use std::panic::AssertUnwindSafe; +use std::path::Path; use std::str::FromStr; use std::sync::Arc; use alloy_primitives::{Address, U256}; +use citrea_common::tasks::manager::TaskManager; use citrea_common::{BatchProverConfig, SequencerConfig}; use citrea_stf::genesis_config::GenesisPaths; use citrea_storage_ops::pruning::types::StorageNodeType; @@ -14,8 +17,10 @@ use sov_db::ledger_db::{LedgerDB, SharedLedgerOps}; use sov_db::native_db::NativeDB; use sov_db::rocks_db_config::RocksdbConfig; use sov_db::schema::tables::{ - BATCH_PROVER_LEDGER_TABLES, FULL_NODE_LEDGER_TABLES, SEQUENCER_LEDGER_TABLES, + ProofsBySlotNumber, VerifiedBatchProofsBySlotNumber, BATCH_PROVER_LEDGER_TABLES, + FULL_NODE_LEDGER_TABLES, SEQUENCER_LEDGER_TABLES, }; +use sov_db::schema::types::SlotNumber; use sov_db::state_db::StateDB; use sov_mock_da::{MockAddress, MockDaService}; @@ -27,6 +32,135 @@ use crate::common::helpers::{ use crate::common::{make_test_client, TEST_DATA_GENESIS_PATH}; use crate::mock::evm::init_test_rollup; +fn instantiate_dbs( + db_path: &Path, + tables: &[&str], +) -> anyhow::Result<(LedgerDB, Arc, Arc)> { + let tables = tables.iter().map(|x| x.to_string()).collect::>(); + let rocksdb_config = RocksdbConfig::new(&db_path, None, Some(tables.to_vec())); + let ledger_db = LedgerDB::with_config(&rocksdb_config)?; + let native_db = Arc::new(NativeDB::setup_schema_db(&rocksdb_config)?); + let state_db = Arc::new(StateDB::setup_schema_db(&rocksdb_config)?); + + Ok((ledger_db, native_db, state_db)) +} + +async fn start_sequencer( + sequencer_db_dir: &Path, + da_db_dir: &Path, + restart: bool, +) -> (TaskManager<()>, Box, SocketAddr) { + let sequencer_config = SequencerConfig { + min_soft_confirmations_per_commitment: 10, + ..Default::default() + }; + let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); + let rollup_config = create_default_rollup_config( + true, + &sequencer_db_dir, + &da_db_dir, + NodeMode::SequencerNode, + None, + ); + + let sequencer_config1 = sequencer_config.clone(); + let seq_task_manager = start_rollup( + seq_port_tx, + GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), + None, + None, + rollup_config, + Some(sequencer_config1), + None, + false, + ) + .await; + + let seq_port = seq_port_rx.await.unwrap(); + let seq_test_client = if restart { + make_test_client(seq_port).await.unwrap() + } else { + init_test_rollup(seq_port).await + }; + + (seq_task_manager, seq_test_client, seq_port) +} + +async fn start_full_node( + full_node_db_dir: &Path, + da_db_dir: &Path, + seq_port: SocketAddr, + restart: bool, +) -> (TaskManager<()>, Box) { + let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); + let rollup_config = create_default_rollup_config( + true, + &full_node_db_dir, + &da_db_dir, + NodeMode::FullNode(seq_port), + None, + ); + let full_node_task_manager = start_rollup( + full_node_port_tx, + GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), + None, + None, + rollup_config, + None, + None, + false, + ) + .await; + let full_node_port = full_node_port_rx.await.unwrap(); + let full_node_test_client = if restart { + make_test_client(full_node_port).await.unwrap() + } else { + init_test_rollup(full_node_port).await + }; + + (full_node_task_manager, full_node_test_client) +} + +async fn start_batch_prover( + batch_prover_db_dir: &Path, + da_db_dir: &Path, + seq_port: SocketAddr, + restart: bool, +) -> (TaskManager<()>, Box) { + let (batch_prover_port_tx, batch_prover_port_rx) = tokio::sync::oneshot::channel(); + let rollup_config = create_default_rollup_config( + true, + &batch_prover_db_dir, + &da_db_dir, + NodeMode::Prover(seq_port), + None, + ); + let batch_prover_task_manager = start_rollup( + batch_prover_port_tx, + GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), + Some(BatchProverConfig { + proving_mode: citrea_common::ProverGuestRunConfig::Execute, + proof_sampling_number: 0, + enable_recovery: true, + }), + None, + rollup_config, + None, + None, + false, + ) + .await; + + let batch_prover_port = batch_prover_port_rx.await.unwrap(); + let batch_prover_test_client = if restart { + make_test_client(batch_prover_port).await.unwrap() + } else { + init_test_rollup(batch_prover_port).await + }; + + (batch_prover_task_manager, batch_prover_test_client) +} + async fn fill_blocks(test_client: &TestClient, da_service: &MockDaService, addr: &Address) { for i in 1..=50 { // send one ether to some address @@ -47,23 +181,24 @@ async fn fill_blocks(test_client: &TestClient, da_service: &MockDaService, addr: } } -async fn assert_dbs(test_client: Box, addr: Address) { +async fn assert_dbs(test_client: Box, addr: Address, at_block: u64, balance: u128) { // Check soft confirmations have been rolled back in Ledger DB - wait_for_l2_block(&test_client, 40, None).await; + wait_for_l2_block(&test_client, at_block, None).await; // Check state DB is rolled back. let get_balance_result = test_client - .eth_get_balance(addr, Some(BlockId::Number(BlockNumberOrTag::Latest))) + .eth_get_balance( + addr, + Some(BlockId::Number(BlockNumberOrTag::Number(at_block))), + ) .await; assert!(get_balance_result.is_ok()); - assert_eq!( - get_balance_result.unwrap(), - U256::from(40000000000000000000u128) - ); + assert_eq!(get_balance_result.unwrap(), U256::from(balance)); // Check native DB is rolled back let check_block_by_number_result = AssertUnwindSafe( - test_client.eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Number(41))), + test_client + .eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Number(at_block + 1))), ) .catch_unwind() .await; @@ -71,14 +206,14 @@ async fn assert_dbs(test_client: Box, addr: Address) { // Should NOT panic as the data we're requesting here is correct test_client - .eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Number(40))) + .eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Number(at_block))) .await; } /// Trigger rollback DB data. #[tokio::test(flavor = "multi_thread")] async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { - citrea::initialize_logging(tracing::Level::DEBUG); + // citrea::initialize_logging(tracing::Level::DEBUG); let storage_dir = tempdir_with_children(&["DA", "sequencer"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); @@ -92,52 +227,20 @@ async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { } wait_for_l1_block(&da_service, 3, None).await; - let sequencer_config = SequencerConfig { - min_soft_confirmations_per_commitment: 10, - ..Default::default() - }; - let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); - let rollup_config = create_default_rollup_config( - true, - &sequencer_db_dir, - &da_db_dir, - NodeMode::SequencerNode, - None, - ); - - let task_manager = start_rollup( - seq_port_tx, - GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), - None, - None, - rollup_config, - Some(sequencer_config.clone()), - None, - false, - ) - .await; - - let seq_port = seq_port_rx.await.unwrap(); - let seq_test_client = init_test_rollup(seq_port).await; + let (seq_task_manager, seq_test_client, _seq_port) = + start_sequencer(&sequencer_db_dir, &da_db_dir, false).await; let addr = Address::from_str("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92265").unwrap(); fill_blocks(&seq_test_client, &da_service, &addr).await; - task_manager.abort().await; + seq_task_manager.abort().await; let new_sequencer_db_dir = storage_dir.path().join("sequencer2").to_path_buf(); copy_db_dir_recursive(&sequencer_db_dir, &new_sequencer_db_dir).unwrap(); - let sequencer_tables = SEQUENCER_LEDGER_TABLES - .iter() - .map(|x| x.to_string()) - .collect::>(); - let rocksdb_config = - RocksdbConfig::new(&new_sequencer_db_dir, None, Some(sequencer_tables.to_vec())); - let ledger_db = LedgerDB::with_config(&rocksdb_config)?; - let native_db = Arc::new(NativeDB::setup_schema_db(&rocksdb_config)?); - let state_db = Arc::new(StateDB::setup_schema_db(&rocksdb_config)?); + let (ledger_db, native_db, state_db) = + instantiate_dbs(&sequencer_db_dir, SEQUENCER_LEDGER_TABLES).unwrap(); let rollback = Rollback::new(ledger_db.inner(), state_db.clone(), native_db.clone()); // rollback 10 L2 blocks @@ -160,33 +263,12 @@ async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { drop(native_db); drop(ledger_db); - let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); - let rollup_config = create_default_rollup_config( - true, - &new_sequencer_db_dir, - &da_db_dir, - NodeMode::SequencerNode, - None, - ); - let seq_task = tokio::spawn(async move { - start_rollup( - seq_port_tx, - GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), - None, - None, - rollup_config, - Some(sequencer_config), - None, - false, - ) - .await; - }); - let seq_port = seq_port_rx.await.unwrap(); - let seq_test_client = make_test_client(seq_port).await.unwrap(); + let (seq_task_manager, seq_test_client, _) = + start_sequencer(&new_sequencer_db_dir, &da_db_dir, true).await; - assert_dbs(seq_test_client, addr).await; + assert_dbs(seq_test_client, addr, 40, 40000000000000000000).await; - seq_task.abort(); + seq_task_manager.abort().await; Ok(()) } @@ -199,7 +281,7 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); - let fullnode_db_dir = storage_dir.path().join("full-node").to_path_buf(); + let full_node_db_dir = storage_dir.path().join("full-node").to_path_buf(); let da_service = MockDaService::new(MockAddress::default(), &da_db_dir.clone()); @@ -209,56 +291,11 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { } wait_for_l1_block(&da_service, 3, None).await; - let sequencer_config = SequencerConfig { - min_soft_confirmations_per_commitment: 10, - ..Default::default() - }; - let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); - let rollup_config = create_default_rollup_config( - true, - &sequencer_db_dir, - &da_db_dir, - NodeMode::SequencerNode, - None, - ); - - let sequencer_config1 = sequencer_config.clone(); - let seq_task_manager = start_rollup( - seq_port_tx, - GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), - None, - None, - rollup_config, - Some(sequencer_config1), - None, - false, - ) - .await; + let (seq_task_manager, seq_test_client, seq_port) = + start_sequencer(&sequencer_db_dir, &da_db_dir, false).await; - let seq_port = seq_port_rx.await.unwrap(); - let seq_test_client = init_test_rollup(seq_port).await; - - let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); - let rollup_config = create_default_rollup_config( - true, - &fullnode_db_dir, - &da_db_dir, - NodeMode::FullNode(seq_port), - None, - ); - let full_node_task_manager = start_rollup( - full_node_port_tx, - GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), - None, - None, - rollup_config, - None, - None, - false, - ) - .await; - let full_node_port = full_node_port_rx.await.unwrap(); - let full_node_test_client = init_test_rollup(full_node_port).await; + let (full_node_task_manager, full_node_test_client) = + start_full_node(&full_node_db_dir, &da_db_dir, seq_port, false).await; let addr = Address::from_str("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92265").unwrap(); @@ -270,17 +307,10 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { full_node_task_manager.abort().await; let new_full_node_db_dir = storage_dir.path().join("full-node2").to_path_buf(); - copy_db_dir_recursive(&fullnode_db_dir, &new_full_node_db_dir).unwrap(); - - let full_node_tables = FULL_NODE_LEDGER_TABLES - .iter() - .map(|x| x.to_string()) - .collect::>(); - let rocksdb_config = - RocksdbConfig::new(&new_full_node_db_dir, None, Some(full_node_tables.to_vec())); - let ledger_db = LedgerDB::with_config(&rocksdb_config)?; - let native_db = Arc::new(NativeDB::setup_schema_db(&rocksdb_config)?); - let state_db = Arc::new(StateDB::setup_schema_db(&rocksdb_config)?); + copy_db_dir_recursive(&full_node_db_dir, &new_full_node_db_dir).unwrap(); + + let (ledger_db, native_db, state_db) = + instantiate_dbs(&new_full_node_db_dir, FULL_NODE_LEDGER_TABLES).unwrap(); let rollback = Rollback::new(ledger_db.inner(), state_db.clone(), native_db.clone()); // rollback 10 L2 blocks @@ -303,29 +333,10 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { drop(native_db); drop(ledger_db); - let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); - let rollup_config = create_default_rollup_config( - true, - &new_full_node_db_dir, - &da_db_dir, - NodeMode::FullNode(seq_port), - None, - ); - let full_node_task_manager = start_rollup( - full_node_port_tx, - GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), - None, - None, - rollup_config, - None, - None, - false, - ) - .await; - let full_node_port = full_node_port_rx.await.unwrap(); - let full_node_test_client = make_test_client(full_node_port).await.unwrap(); + let (full_node_task_manager, full_node_test_client) = + start_full_node(&new_full_node_db_dir, &da_db_dir, seq_port, true).await; - assert_dbs(full_node_test_client, addr).await; + assert_dbs(full_node_test_client, addr, 40, 40000000000000000000).await; full_node_task_manager.abort().await; @@ -337,9 +348,10 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { citrea::initialize_logging(tracing::Level::DEBUG); - let storage_dir = tempdir_with_children(&["DA", "sequencer", "batch-prover"]); + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node", "batch-prover"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let full_node_db_dir = storage_dir.path().join("full-node").to_path_buf(); let batch_prover_db_dir = storage_dir.path().join("batch-prover").to_path_buf(); let da_service = MockDaService::new(MockAddress::default(), &da_db_dir.clone()); @@ -350,93 +362,86 @@ async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { } wait_for_l1_block(&da_service, 3, None).await; - let sequencer_config = SequencerConfig { - min_soft_confirmations_per_commitment: 10, - ..Default::default() - }; - let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); - let rollup_config = create_default_rollup_config( - true, - &sequencer_db_dir, - &da_db_dir, - NodeMode::SequencerNode, - None, - ); + let (seq_task_manager, seq_test_client, seq_port) = + start_sequencer(&sequencer_db_dir, &da_db_dir, false).await; - let sequencer_config1 = sequencer_config.clone(); - let seq_task_manager = start_rollup( - seq_port_tx, - GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), - None, - None, - rollup_config, - Some(sequencer_config1), - None, - false, - ) - .await; + let (full_node_task_manager, full_node_test_client) = + start_full_node(&full_node_db_dir, &da_db_dir, seq_port, false).await; - let seq_port = seq_port_rx.await.unwrap(); - let seq_test_client = init_test_rollup(seq_port).await; - - let (batch_prover_port_tx, batch_prover_port_rx) = tokio::sync::oneshot::channel(); - let rollup_config = create_default_rollup_config( - true, - &batch_prover_db_dir, - &da_db_dir, - NodeMode::Prover(seq_port), - None, - ); - let batch_prover_task_manager = start_rollup( - batch_prover_port_tx, - GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), - Some(BatchProverConfig { - proving_mode: citrea_common::ProverGuestRunConfig::Execute, - proof_sampling_number: 0, - enable_recovery: true, - }), - None, - rollup_config, - None, - None, - false, - ) - .await; - - let batch_prover_port = batch_prover_port_rx.await.unwrap(); - let batch_prover_test_client = init_test_rollup(batch_prover_port).await; + let (batch_prover_task_manager, batch_prover_test_client) = + start_batch_prover(&batch_prover_db_dir, &da_db_dir, seq_port, false).await; let addr = Address::from_str("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92265").unwrap(); fill_blocks(&seq_test_client, &da_service, &addr).await; + wait_for_l2_block(&full_node_test_client, 50, None).await; wait_for_l2_block(&batch_prover_test_client, 50, None).await; + let get_balance_result = full_node_test_client + .eth_get_balance(addr, Some(BlockId::Number(BlockNumberOrTag::Number(50)))) + .await; + assert!(get_balance_result.is_ok()); + assert_eq!( + get_balance_result.unwrap(), + U256::from(50000000000000000000u128) + ); + + let get_balance_result = batch_prover_test_client + .eth_get_balance(addr, Some(BlockId::Number(BlockNumberOrTag::Number(50)))) + .await; + assert!(get_balance_result.is_ok()); + assert_eq!( + get_balance_result.unwrap(), + U256::from(50000000000000000000u128) + ); + seq_task_manager.abort().await; + full_node_task_manager.abort().await; batch_prover_task_manager.abort().await; + let new_full_node_db_dir = storage_dir.path().join("full-node2").to_path_buf(); + copy_db_dir_recursive(&full_node_db_dir, &new_full_node_db_dir).unwrap(); + let new_batch_prover_db_dir = storage_dir.path().join("batch-prover2").to_path_buf(); copy_db_dir_recursive(&batch_prover_db_dir, &new_batch_prover_db_dir).unwrap(); - let batch_prover_tables = BATCH_PROVER_LEDGER_TABLES - .iter() - .map(|x| x.to_string()) - .collect::>(); - let rocksdb_config = RocksdbConfig::new( - &new_batch_prover_db_dir, - None, - Some(batch_prover_tables.to_vec()), - ); - let ledger_db = LedgerDB::with_config(&rocksdb_config)?; - let native_db = Arc::new(NativeDB::setup_schema_db(&rocksdb_config)?); - let state_db = Arc::new(StateDB::setup_schema_db(&rocksdb_config)?); - let rollback = Rollback::new(ledger_db.inner(), state_db.clone(), native_db.clone()); + // At block 22, full node SHOULD have a verified proof + let (ledger_db, native_db, state_db) = + instantiate_dbs(&new_full_node_db_dir, FULL_NODE_LEDGER_TABLES).unwrap(); + let ledger_db = ledger_db.inner(); + assert!(ledger_db + .get::(&SlotNumber(10)) + .unwrap() + .is_some()); + assert!(ledger_db + .get::(&SlotNumber(22)) + .unwrap() + .is_some()); - // rollback 10 L2 blocks let rollback_to_l2 = 40; - // We have 13 L1 blocks by now and we want to rollback - // the last 2. let rollback_to_l1 = 11; + + // Rollback full node + let rollback = Rollback::new(ledger_db, state_db.clone(), native_db.clone()); + rollback + .execute( + StorageNodeType::FullNode, + 50, + rollback_to_l2, + rollback_to_l1, + ) + .await + .unwrap(); + + drop(rollback); + drop(state_db); + drop(native_db); + + // Rollback batch prover + let (ledger_db, native_db, state_db) = + instantiate_dbs(&new_batch_prover_db_dir, BATCH_PROVER_LEDGER_TABLES).unwrap(); + let rollback = Rollback::new(ledger_db.inner(), state_db.clone(), native_db.clone()); rollback .execute( StorageNodeType::BatchProver, @@ -452,36 +457,46 @@ async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { drop(native_db); drop(ledger_db); - let (batch_prover_port_tx, batch_prover_port_rx) = tokio::sync::oneshot::channel(); - let rollup_config = create_default_rollup_config( - true, - &new_batch_prover_db_dir, - &da_db_dir, - NodeMode::Prover(seq_port), - None, - ); + let (batch_prover_task_manager, batch_prover_test_client) = + start_batch_prover(&new_batch_prover_db_dir, &da_db_dir, seq_port, true).await; - let batch_prover_task_manager = start_rollup( - batch_prover_port_tx, - GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), - Some(BatchProverConfig { - proving_mode: citrea_common::ProverGuestRunConfig::Execute, - proof_sampling_number: 0, - enable_recovery: true, - }), - None, - rollup_config, - None, - None, - false, - ) - .await; - let batch_prover_port = batch_prover_port_rx.await.unwrap(); - let batch_prover_test_client = make_test_client(batch_prover_port).await.unwrap(); - - assert_dbs(batch_prover_test_client, addr).await; + assert_dbs(batch_prover_test_client, addr, 40, 40000000000000000000).await; batch_prover_task_manager.abort().await; + let old_full_node_db_dir = new_full_node_db_dir; + let new_full_node_db_dir = storage_dir.path().join("full-node3").to_path_buf(); + copy_db_dir_recursive(&old_full_node_db_dir, &new_full_node_db_dir).unwrap(); + + let old_batch_prover_db_dir = new_batch_prover_db_dir; + let new_batch_prover_db_dir = storage_dir.path().join("batch-prover3").to_path_buf(); + copy_db_dir_recursive(&old_batch_prover_db_dir, &new_batch_prover_db_dir).unwrap(); + + // At block 22, verified proof in full node should have been pruned. + let (fn_ledger_db, _, _) = + instantiate_dbs(&new_full_node_db_dir, FULL_NODE_LEDGER_TABLES).unwrap(); + let fn_ledger_db = fn_ledger_db.inner(); + assert!(fn_ledger_db + .get::(&SlotNumber(10)) + .unwrap() + .is_some()); + assert!(fn_ledger_db + .get::(&SlotNumber(22)) + .unwrap() + .is_none()); + + // At block 22, verified proof in full node should have been pruned. + let (bp_ledger_db, _, _) = + instantiate_dbs(&new_batch_prover_db_dir, BATCH_PROVER_LEDGER_TABLES).unwrap(); + let bp_ledger_db = bp_ledger_db.inner(); + assert!(bp_ledger_db + .get::(&SlotNumber(10)) + .unwrap() + .is_some()); + assert!(bp_ledger_db + .get::(&SlotNumber(22)) + .unwrap() + .is_none()); + Ok(()) } From eeab583d67312755b8efca6b9d04d3c6aeccd765 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Mon, 24 Feb 2025 17:22:22 +0300 Subject: [PATCH 22/45] Clippy --- bin/citrea/tests/mock/rollback.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index eea568704f..5206c7ca34 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -37,7 +37,7 @@ fn instantiate_dbs( tables: &[&str], ) -> anyhow::Result<(LedgerDB, Arc, Arc)> { let tables = tables.iter().map(|x| x.to_string()).collect::>(); - let rocksdb_config = RocksdbConfig::new(&db_path, None, Some(tables.to_vec())); + let rocksdb_config = RocksdbConfig::new(db_path, None, Some(tables.to_vec())); let ledger_db = LedgerDB::with_config(&rocksdb_config)?; let native_db = Arc::new(NativeDB::setup_schema_db(&rocksdb_config)?); let state_db = Arc::new(StateDB::setup_schema_db(&rocksdb_config)?); @@ -57,8 +57,8 @@ async fn start_sequencer( let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); let rollup_config = create_default_rollup_config( true, - &sequencer_db_dir, - &da_db_dir, + sequencer_db_dir, + da_db_dir, NodeMode::SequencerNode, None, ); @@ -95,8 +95,8 @@ async fn start_full_node( let (full_node_port_tx, full_node_port_rx) = tokio::sync::oneshot::channel(); let rollup_config = create_default_rollup_config( true, - &full_node_db_dir, - &da_db_dir, + full_node_db_dir, + da_db_dir, NodeMode::FullNode(seq_port), None, ); @@ -130,8 +130,8 @@ async fn start_batch_prover( let (batch_prover_port_tx, batch_prover_port_rx) = tokio::sync::oneshot::channel(); let rollup_config = create_default_rollup_config( true, - &batch_prover_db_dir, - &da_db_dir, + batch_prover_db_dir, + da_db_dir, NodeMode::Prover(seq_port), None, ); From 4235353e95b59939f7c9b52d699fa1f29e420feb Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Mon, 24 Feb 2025 17:27:45 +0300 Subject: [PATCH 23/45] Fix sequencer test --- bin/citrea/tests/mock/rollback.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index 5206c7ca34..180f3cbaca 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -213,7 +213,7 @@ async fn assert_dbs(test_client: Box, addr: Address, at_block: u64, /// Trigger rollback DB data. #[tokio::test(flavor = "multi_thread")] async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { - // citrea::initialize_logging(tracing::Level::DEBUG); + citrea::initialize_logging(tracing::Level::DEBUG); let storage_dir = tempdir_with_children(&["DA", "sequencer"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); @@ -240,7 +240,7 @@ async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { copy_db_dir_recursive(&sequencer_db_dir, &new_sequencer_db_dir).unwrap(); let (ledger_db, native_db, state_db) = - instantiate_dbs(&sequencer_db_dir, SEQUENCER_LEDGER_TABLES).unwrap(); + instantiate_dbs(&new_sequencer_db_dir, SEQUENCER_LEDGER_TABLES).unwrap(); let rollback = Rollback::new(ledger_db.inner(), state_db.clone(), native_db.clone()); // rollback 10 L2 blocks From cd90f9894e6f2dae05f1134c48531c4cf485d814 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Mon, 24 Feb 2025 17:30:40 +0300 Subject: [PATCH 24/45] Disable logging --- bin/citrea/tests/mock/rollback.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index 180f3cbaca..afa93a44bd 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -213,7 +213,7 @@ async fn assert_dbs(test_client: Box, addr: Address, at_block: u64, /// Trigger rollback DB data. #[tokio::test(flavor = "multi_thread")] async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { - citrea::initialize_logging(tracing::Level::DEBUG); + // citrea::initialize_logging(tracing::Level::DEBUG); let storage_dir = tempdir_with_children(&["DA", "sequencer"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); @@ -276,7 +276,7 @@ async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { /// Trigger rollback DB data. #[tokio::test(flavor = "multi_thread")] async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { - citrea::initialize_logging(tracing::Level::DEBUG); + // citrea::initialize_logging(tracing::Level::DEBUG); let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); @@ -346,7 +346,7 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { /// Trigger rollback DB data. #[tokio::test(flavor = "multi_thread")] async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { - citrea::initialize_logging(tracing::Level::DEBUG); + // citrea::initialize_logging(tracing::Level::DEBUG); let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node", "batch-prover"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); From 7f8f9c023036a66619b1de77104fb52b2fe4c3a1 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Tue, 25 Feb 2025 16:06:39 +0300 Subject: [PATCH 25/45] Resolve test issues --- bin/citrea/tests/mock/rollback.rs | 50 ++++++++++++++++++++++++++----- 1 file changed, 43 insertions(+), 7 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index afa93a44bd..4dce6e6546 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -52,6 +52,7 @@ async fn start_sequencer( ) -> (TaskManager<()>, Box, SocketAddr) { let sequencer_config = SequencerConfig { min_soft_confirmations_per_commitment: 10, + test_mode: true, ..Default::default() }; let (seq_port_tx, seq_port_rx) = tokio::sync::oneshot::channel(); @@ -63,14 +64,13 @@ async fn start_sequencer( None, ); - let sequencer_config1 = sequencer_config.clone(); let seq_task_manager = start_rollup( seq_port_tx, GenesisPaths::from_dir(TEST_DATA_GENESIS_PATH), None, None, rollup_config, - Some(sequencer_config1), + Some(sequencer_config), None, false, ) @@ -171,12 +171,9 @@ async fn fill_blocks(test_client: &TestClient, da_service: &MockDaService, addr: test_client.spam_publish_batch_request().await.unwrap(); - if i % 5 == 0 { + if i % 10 == 0 { wait_for_l2_block(test_client, i, None).await; - - da_service.publish_test_block().await.unwrap(); - - wait_for_l1_block(da_service, 3 + (i / 5), None).await; + wait_for_l1_block(da_service, 3 + (i / 10), None).await; } } } @@ -234,6 +231,17 @@ async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { fill_blocks(&seq_test_client, &da_service, &addr).await; + wait_for_l2_block(&seq_test_client, 50, None).await; + + let get_balance_result = seq_test_client + .eth_get_balance(addr, Some(BlockId::Number(BlockNumberOrTag::Number(50)))) + .await; + assert!(get_balance_result.is_ok()); + assert_eq!( + get_balance_result.unwrap(), + U256::from(50000000000000000000u128) + ); + seq_task_manager.abort().await; let new_sequencer_db_dir = storage_dir.path().join("sequencer2").to_path_buf(); @@ -301,8 +309,27 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { fill_blocks(&seq_test_client, &da_service, &addr).await; + wait_for_l2_block(&seq_test_client, 50, None).await; wait_for_l2_block(&full_node_test_client, 50, None).await; + let get_balance_result = seq_test_client + .eth_get_balance(addr, Some(BlockId::Number(BlockNumberOrTag::Number(50)))) + .await; + assert!(get_balance_result.is_ok()); + assert_eq!( + get_balance_result.unwrap(), + U256::from(50000000000000000000u128) + ); + + let get_balance_result = full_node_test_client + .eth_get_balance(addr, Some(BlockId::Number(BlockNumberOrTag::Number(50)))) + .await; + assert!(get_balance_result.is_ok()); + assert_eq!( + get_balance_result.unwrap(), + U256::from(50000000000000000000u128) + ); + seq_task_manager.abort().await; full_node_task_manager.abort().await; @@ -378,6 +405,15 @@ async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { wait_for_l2_block(&full_node_test_client, 50, None).await; wait_for_l2_block(&batch_prover_test_client, 50, None).await; + let get_balance_result = seq_test_client + .eth_get_balance(addr, Some(BlockId::Number(BlockNumberOrTag::Number(50)))) + .await; + assert!(get_balance_result.is_ok()); + assert_eq!( + get_balance_result.unwrap(), + U256::from(50000000000000000000u128) + ); + let get_balance_result = full_node_test_client .eth_get_balance(addr, Some(BlockId::Number(BlockNumberOrTag::Number(50)))) .await; From a06c7b6335f270b2e8daaf81f9352f15fbfb943b Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Tue, 25 Feb 2025 21:50:12 +0300 Subject: [PATCH 26/45] Prune up to target l1 --- crates/storage-ops/src/rollback/components/ledger_db/slots.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs index fa8e022cd5..c10a6752cb 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs @@ -22,7 +22,7 @@ pub(crate) fn rollback_slots( let slot_height = record.key; - if slot_height <= SlotNumber(target_l1) { + if slot_height < SlotNumber(target_l1) { break; } From 825dab0ce81ce062078250d2d9816c441837bd84 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Tue, 25 Feb 2025 22:21:16 +0300 Subject: [PATCH 27/45] Delete short header proofs --- crates/storage-ops/src/utils.rs | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/crates/storage-ops/src/utils.rs b/crates/storage-ops/src/utils.rs index c8270d20fd..1b4f0f53d1 100644 --- a/crates/storage-ops/src/utils.rs +++ b/crates/storage-ops/src/utils.rs @@ -1,7 +1,8 @@ use sov_db::schema::tables::{ CommitmentsByNumber, L2RangeByL1Height, L2Witness, LightClientProofBySlotNumber, - ProofsBySlotNumber, ProofsBySlotNumberV2, ProverStateDiffs, SlotByHash, SoftConfirmationByHash, - SoftConfirmationByNumber, SoftConfirmationStatus, VerifiedBatchProofsBySlotNumber, + ProofsBySlotNumber, ProofsBySlotNumberV2, ProverStateDiffs, ShortHeaderProofBySlotHash, + SlotByHash, SoftConfirmationByHash, SoftConfirmationByNumber, SoftConfirmationStatus, + VerifiedBatchProofsBySlotNumber, }; use sov_db::schema::types::{DbHash, SlotNumber, SoftConfirmationNumber}; use sov_schema_db::{ScanDirection, DB}; @@ -39,6 +40,9 @@ pub(crate) fn delete_slots_by_number( ledger_db.delete::(&slot_number)?; ledger_db.delete::(&slot_number)?; + if !matches!(node_type, StorageNodeType::LightClient) { + delete_short_header_proofs(ledger_db, slot_number)?; + } if !matches!(node_type, StorageNodeType::Sequencer) { delete_slot_by_hash(ledger_db, slot_number)?; } @@ -59,6 +63,26 @@ pub(crate) fn delete_slots_by_number( Ok(()) } +fn delete_short_header_proofs(ledger_db: &DB, slot_number: SlotNumber) -> anyhow::Result<()> { + let mut slots = + ledger_db.iter_with_direction::(Default::default(), ScanDirection::Backward)?; + slots.seek_to_last(); + + for record in slots { + let Ok(record) = record else { + continue; + }; + + // TODO for pruning this should be less than + if record.value > slot_number { + println!("Deleting slot short proof: {:?}", record.key); + ledger_db.delete::(&record.key)?; + } + } + + Ok(()) +} + fn delete_slot_by_hash(ledger_db: &DB, slot_number: SlotNumber) -> anyhow::Result<()> { let mut slots = ledger_db.iter_with_direction::(Default::default(), ScanDirection::Forward)?; From c94fdfba81ea21d37a7d0b0a3b699e0c50238ab4 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Tue, 25 Feb 2025 22:21:35 +0300 Subject: [PATCH 28/45] Fix sequencer and full node tests --- bin/citrea/tests/mock/rollback.rs | 266 +++++++++++++++++++++--------- 1 file changed, 185 insertions(+), 81 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index 4dce6e6546..e85d07e2f7 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -17,7 +17,7 @@ use sov_db::ledger_db::{LedgerDB, SharedLedgerOps}; use sov_db::native_db::NativeDB; use sov_db::rocks_db_config::RocksdbConfig; use sov_db::schema::tables::{ - ProofsBySlotNumber, VerifiedBatchProofsBySlotNumber, BATCH_PROVER_LEDGER_TABLES, + ProofsBySlotNumberV2, VerifiedBatchProofsBySlotNumber, BATCH_PROVER_LEDGER_TABLES, FULL_NODE_LEDGER_TABLES, SEQUENCER_LEDGER_TABLES, }; use sov_db::schema::types::SlotNumber; @@ -72,7 +72,7 @@ async fn start_sequencer( rollup_config, Some(sequencer_config), None, - false, + restart, ) .await; @@ -161,6 +161,32 @@ async fn start_batch_prover( (batch_prover_task_manager, batch_prover_test_client) } +async fn rollback_node( + node_type: StorageNodeType, + tables: &[&str], + old_path: &Path, + new_path: &Path, + rollback_to_l2: u64, + rollback_to_l1: u64, +) -> anyhow::Result<()> { + copy_db_dir_recursive(&old_path, &new_path).unwrap(); + + let (ledger_db, native_db, state_db) = instantiate_dbs(&new_path, tables).unwrap(); + let rollback = Rollback::new(ledger_db.inner(), state_db.clone(), native_db.clone()); + + rollback + .execute(node_type, 50, rollback_to_l2, rollback_to_l1) + .await + .unwrap(); + + drop(rollback); + drop(state_db); + drop(native_db); + drop(ledger_db); + + Ok(()) +} + async fn fill_blocks(test_client: &TestClient, da_service: &MockDaService, addr: &Address) { for i in 1..=50 { // send one ether to some address @@ -178,7 +204,7 @@ async fn fill_blocks(test_client: &TestClient, da_service: &MockDaService, addr: } } -async fn assert_dbs(test_client: Box, addr: Address, at_block: u64, balance: u128) { +async fn assert_dbs(test_client: &TestClient, addr: Address, at_block: u64, balance: u128) { // Check soft confirmations have been rolled back in Ledger DB wait_for_l2_block(&test_client, at_block, None).await; @@ -253,9 +279,9 @@ async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { // rollback 10 L2 blocks let rollback_to_l2 = 40; - // We have 13 L1 blocks by now and we want to rollback - // the last 2. - let rollback_to_l1 = 11; + // We have 8 L1 blocks by now and we want to rollback + // the last one. + let rollback_to_l1 = 7; rollback .execute( StorageNodeType::Sequencer, @@ -274,7 +300,7 @@ async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { let (seq_task_manager, seq_test_client, _) = start_sequencer(&new_sequencer_db_dir, &da_db_dir, true).await; - assert_dbs(seq_test_client, addr, 40, 40000000000000000000).await; + assert_dbs(&seq_test_client, addr, 40, 40000000000000000000).await; seq_task_manager.abort().await; @@ -284,7 +310,7 @@ async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { /// Trigger rollback DB data. #[tokio::test(flavor = "multi_thread")] async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { - // citrea::initialize_logging(tracing::Level::DEBUG); + citrea::initialize_logging(tracing::Level::DEBUG); let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); @@ -333,38 +359,63 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { seq_task_manager.abort().await; full_node_task_manager.abort().await; - let new_full_node_db_dir = storage_dir.path().join("full-node2").to_path_buf(); - copy_db_dir_recursive(&full_node_db_dir, &new_full_node_db_dir).unwrap(); - - let (ledger_db, native_db, state_db) = - instantiate_dbs(&new_full_node_db_dir, FULL_NODE_LEDGER_TABLES).unwrap(); - let rollback = Rollback::new(ledger_db.inner(), state_db.clone(), native_db.clone()); - // rollback 10 L2 blocks let rollback_to_l2 = 40; - // We have 13 L1 blocks by now and we want to rollback - // the last 2. - let rollback_to_l1 = 11; - rollback - .execute( - StorageNodeType::FullNode, - 50, - rollback_to_l2, - rollback_to_l1, - ) - .await - .unwrap(); + // We have 8 L1 blocks by now and we want to rollback + // the last one. + let rollback_to_l1 = 7; - drop(rollback); - drop(state_db); - drop(native_db); - drop(ledger_db); + let new_sequencer_db_dir = storage_dir.path().join("sequencer2").to_path_buf(); + rollback_node( + StorageNodeType::Sequencer, + SEQUENCER_LEDGER_TABLES, + &sequencer_db_dir, + &new_sequencer_db_dir, + rollback_to_l2, + rollback_to_l1, + ) + .await + .unwrap(); + + let new_full_node_db_dir = storage_dir.path().join("full-node2").to_path_buf(); + rollback_node( + StorageNodeType::FullNode, + FULL_NODE_LEDGER_TABLES, + &full_node_db_dir, + &new_full_node_db_dir, + rollback_to_l2, + rollback_to_l1, + ) + .await + .unwrap(); + let new_sequencer_db_dir = storage_dir.path().join("sequencer3").to_path_buf(); + copy_db_dir_recursive( + &storage_dir.path().join("sequencer2"), + &new_sequencer_db_dir, + ) + .unwrap(); + let (seq_task_manager, seq_test_client, seq_port) = + start_sequencer(&new_sequencer_db_dir, &da_db_dir, true).await; + + let new_full_node_db_dir = storage_dir.path().join("full-node3").to_path_buf(); + copy_db_dir_recursive( + &storage_dir.path().join("full-node2"), + &new_full_node_db_dir, + ) + .unwrap(); let (full_node_task_manager, full_node_test_client) = start_full_node(&new_full_node_db_dir, &da_db_dir, seq_port, true).await; - assert_dbs(full_node_test_client, addr, 40, 40000000000000000000).await; + assert_dbs(&full_node_test_client, addr, 40, 40000000000000000000).await; + for _ in 0..10 { + seq_test_client.spam_publish_batch_request().await.unwrap(); + } + wait_for_l2_block(&seq_test_client, 50, None).await; + wait_for_l2_block(&full_node_test_client, 50, None).await; + + seq_task_manager.abort().await; full_node_task_manager.abort().await; Ok(()) @@ -373,7 +424,7 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { /// Trigger rollback DB data. #[tokio::test(flavor = "multi_thread")] async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { - // citrea::initialize_logging(tracing::Level::DEBUG); + citrea::initialize_logging(tracing::Level::DEBUG); let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node", "batch-prover"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); @@ -443,81 +494,134 @@ async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { copy_db_dir_recursive(&batch_prover_db_dir, &new_batch_prover_db_dir).unwrap(); // At block 22, full node SHOULD have a verified proof - let (ledger_db, native_db, state_db) = + let (ledger_db, _native_db, _state_db) = instantiate_dbs(&new_full_node_db_dir, FULL_NODE_LEDGER_TABLES).unwrap(); let ledger_db = ledger_db.inner(); assert!(ledger_db - .get::(&SlotNumber(10)) + .get::(&SlotNumber(7)) .unwrap() .is_some()); assert!(ledger_db - .get::(&SlotNumber(22)) + .get::(&SlotNumber(9)) .unwrap() .is_some()); + // rollback 10 L2 blocks let rollback_to_l2 = 40; - let rollback_to_l1 = 11; - - // Rollback full node - let rollback = Rollback::new(ledger_db, state_db.clone(), native_db.clone()); - rollback - .execute( - StorageNodeType::FullNode, - 50, - rollback_to_l2, - rollback_to_l1, - ) - .await - .unwrap(); + // We have 8 L1 blocks by now and we want to rollback + // the last one. + let rollback_to_l1 = 7; - drop(rollback); - drop(state_db); - drop(native_db); + let new_full_node_db_dir = storage_dir.path().join("full-node3").to_path_buf(); + copy_db_dir_recursive( + &storage_dir.path().join("full-node2"), + &new_full_node_db_dir, + ) + .unwrap(); + let new_batch_prover_db_dir = storage_dir.path().join("batch-prover3").to_path_buf(); + copy_db_dir_recursive( + &storage_dir.path().join("batch-prover2"), + &new_batch_prover_db_dir, + ) + .unwrap(); + + let new_sequencer_db_dir = storage_dir.path().join("sequencer3").to_path_buf(); + rollback_node( + StorageNodeType::Sequencer, + SEQUENCER_LEDGER_TABLES, + &sequencer_db_dir, + &new_sequencer_db_dir, + rollback_to_l2, + rollback_to_l1, + ) + .await + .unwrap(); + + rollback_node( + StorageNodeType::FullNode, + FULL_NODE_LEDGER_TABLES, + &full_node_db_dir, + &new_full_node_db_dir, + rollback_to_l2, + rollback_to_l1, + ) + .await + .unwrap(); + + rollback_node( + StorageNodeType::BatchProver, + BATCH_PROVER_LEDGER_TABLES, + &batch_prover_db_dir, + &new_batch_prover_db_dir, + rollback_to_l2, + rollback_to_l1, + ) + .await + .unwrap(); - // Rollback batch prover - let (ledger_db, native_db, state_db) = - instantiate_dbs(&new_batch_prover_db_dir, BATCH_PROVER_LEDGER_TABLES).unwrap(); - let rollback = Rollback::new(ledger_db.inner(), state_db.clone(), native_db.clone()); - rollback - .execute( - StorageNodeType::BatchProver, - 50, - rollback_to_l2, - rollback_to_l1, - ) - .await - .unwrap(); + let new_sequencer_db_dir = storage_dir.path().join("sequencer4").to_path_buf(); + copy_db_dir_recursive( + &storage_dir.path().join("sequencer3"), + &new_sequencer_db_dir, + ) + .unwrap(); + let (seq_task_manager, seq_test_client, seq_port) = + start_sequencer(&new_sequencer_db_dir, &da_db_dir, true).await; - drop(rollback); - drop(state_db); - drop(native_db); - drop(ledger_db); + let new_full_node_db_dir = storage_dir.path().join("full-node4").to_path_buf(); + copy_db_dir_recursive( + &storage_dir.path().join("full-node3"), + &new_full_node_db_dir, + ) + .unwrap(); + let (full_node_task_manager, full_node_test_client) = + start_full_node(&new_full_node_db_dir, &da_db_dir, seq_port, true).await; + let new_batch_prover_db_dir = storage_dir.path().join("batch-prover4").to_path_buf(); + copy_db_dir_recursive( + &storage_dir.path().join("batch-prover3"), + &new_batch_prover_db_dir, + ) + .unwrap(); let (batch_prover_task_manager, batch_prover_test_client) = start_batch_prover(&new_batch_prover_db_dir, &da_db_dir, seq_port, true).await; - assert_dbs(batch_prover_test_client, addr, 40, 40000000000000000000).await; + assert_dbs(&batch_prover_test_client, addr, 40, 40000000000000000000).await; - batch_prover_task_manager.abort().await; + for _ in 0..10 { + seq_test_client.spam_publish_batch_request().await.unwrap(); + } + wait_for_l2_block(&seq_test_client, 50, None).await; + wait_for_l2_block(&full_node_test_client, 50, None).await; + wait_for_l2_block(&batch_prover_test_client, 50, None).await; - let old_full_node_db_dir = new_full_node_db_dir; - let new_full_node_db_dir = storage_dir.path().join("full-node3").to_path_buf(); - copy_db_dir_recursive(&old_full_node_db_dir, &new_full_node_db_dir).unwrap(); + seq_task_manager.abort().await; + full_node_task_manager.abort().await; + batch_prover_task_manager.abort().await; - let old_batch_prover_db_dir = new_batch_prover_db_dir; - let new_batch_prover_db_dir = storage_dir.path().join("batch-prover3").to_path_buf(); - copy_db_dir_recursive(&old_batch_prover_db_dir, &new_batch_prover_db_dir).unwrap(); + let new_full_node_db_dir = storage_dir.path().join("full-node5").to_path_buf(); + copy_db_dir_recursive( + &storage_dir.path().join("full-node4"), + &new_full_node_db_dir, + ) + .unwrap(); + let new_batch_prover_db_dir = storage_dir.path().join("batch-prover5").to_path_buf(); + copy_db_dir_recursive( + &storage_dir.path().join("batch-prover4"), + &new_batch_prover_db_dir, + ) + .unwrap(); // At block 22, verified proof in full node should have been pruned. let (fn_ledger_db, _, _) = instantiate_dbs(&new_full_node_db_dir, FULL_NODE_LEDGER_TABLES).unwrap(); let fn_ledger_db = fn_ledger_db.inner(); assert!(fn_ledger_db - .get::(&SlotNumber(10)) + .get::(&SlotNumber(7)) .unwrap() .is_some()); assert!(fn_ledger_db - .get::(&SlotNumber(22)) + .get::(&SlotNumber(9)) .unwrap() .is_none()); @@ -526,11 +630,11 @@ async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { instantiate_dbs(&new_batch_prover_db_dir, BATCH_PROVER_LEDGER_TABLES).unwrap(); let bp_ledger_db = bp_ledger_db.inner(); assert!(bp_ledger_db - .get::(&SlotNumber(10)) + .get::(&SlotNumber(7)) .unwrap() .is_some()); assert!(bp_ledger_db - .get::(&SlotNumber(22)) + .get::(&SlotNumber(9)) .unwrap() .is_none()); From 251b98830c6da369c01b8f70e104c7857769beea Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Tue, 25 Feb 2025 22:33:41 +0300 Subject: [PATCH 29/45] Pass sequencer commitment l2 height --- bin/cli/src/commands/rollback.rs | 2 ++ bin/cli/src/main.rs | 13 ++++++++++++- .../src/rollback/components/ledger_db/mod.rs | 1 + .../src/rollback/components/ledger_db/slots.rs | 7 ++----- crates/storage-ops/src/rollback/mod.rs | 11 +++++++++-- 5 files changed, 26 insertions(+), 8 deletions(-) diff --git a/bin/cli/src/commands/rollback.rs b/bin/cli/src/commands/rollback.rs index 6e80c21d0b..040d2bcd27 100644 --- a/bin/cli/src/commands/rollback.rs +++ b/bin/cli/src/commands/rollback.rs @@ -16,6 +16,7 @@ pub(crate) async fn rollback( db_path: PathBuf, l2_target: u64, l1_target: u64, + last_sequencer_commitment_l2_height: u64, ) -> anyhow::Result<()> { info!( "Rolling back DB at {} down to L2 {}, L1 {}", @@ -42,6 +43,7 @@ pub(crate) async fn rollback( soft_confirmation_number, l2_target, l1_target, + last_sequencer_commitment_l2_height, ) .await?; diff --git a/bin/cli/src/main.rs b/bin/cli/src/main.rs index a3c8f9ccc5..6690ca60ae 100644 --- a/bin/cli/src/main.rs +++ b/bin/cli/src/main.rs @@ -62,6 +62,9 @@ enum Commands { /// The target L1 block number to rollback to (non-inclusive) #[arg(long)] l1_target: u64, + /// The L2 block number at which there was a sequencer commitment that was sent. + #[arg(long)] + sequencer_commitment_l2_height: u64, }, /// Backup DBs Backup { @@ -98,8 +101,16 @@ async fn main() -> anyhow::Result<()> { db_path, l2_target, l1_target, + sequencer_commitment_l2_height, } => { - commands::rollback(node_type, db_path.clone(), l2_target, l1_target).await?; + commands::rollback( + node_type, + db_path.clone(), + l2_target, + l1_target, + sequencer_commitment_l2_height, + ) + .await?; } Commands::Backup { db_path, diff --git a/crates/storage-ops/src/rollback/components/ledger_db/mod.rs b/crates/storage-ops/src/rollback/components/ledger_db/mod.rs index cc43d3d5c8..e261ee7ada 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/mod.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/mod.rs @@ -16,6 +16,7 @@ pub(crate) fn rollback_ledger_db( ledger_db: Arc, target_l2: u64, target_l1: u64, + last_sequencer_commitment_l2_height: u64, ) { debug!( "Rolling back Ledger, down to L2 block {}, L1 block {}", diff --git a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs index c10a6752cb..ec5b1fcd20 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs @@ -9,6 +9,7 @@ pub(crate) fn rollback_slots( node_type: StorageNodeType, ledger_db: &DB, target_l1: u64, + last_sequencer_commitment_l2_height: u64, ) -> anyhow::Result { let mut slots_to_l2_range = ledger_db .iter_with_direction::(Default::default(), ScanDirection::Backward)?; @@ -30,13 +31,9 @@ pub(crate) fn rollback_slots( || matches!(node_type, StorageNodeType::FullNode) { let slot_range = record.value; - // TODO: Figure out a way to set it to an actual - // commitment range L2 end. - // `CommitmentsByNumber` table is only populated by - // the batch prover. ledger_db.put::( &(), - &SoftConfirmationNumber(slot_range.0 .0 - 1), + &SoftConfirmationNumber(last_sequencer_commitment_l2_height), )?; } diff --git a/crates/storage-ops/src/rollback/mod.rs b/crates/storage-ops/src/rollback/mod.rs index 4ff30ce29c..60190888b6 100644 --- a/crates/storage-ops/src/rollback/mod.rs +++ b/crates/storage-ops/src/rollback/mod.rs @@ -32,13 +32,14 @@ impl Rollback { } } - /// Rollback the provided number of blocks + /// Rollback the provided L2/L1 block combination. pub async fn execute( &self, node_type: StorageNodeType, _current_l2_height: u64, l2_target: u64, l1_target: u64, + last_sequencer_commitment_l2_height: u64, ) -> anyhow::Result<()> { info!("Rolling back until L2 {}, L1 {}", l2_target, l1_target); @@ -47,7 +48,13 @@ impl Rollback { let state_db = self.state_db.clone(); let ledger_rollback_handle = tokio::task::spawn_blocking(move || { - rollback_ledger_db(node_type, ledger_db, l2_target, l1_target) + rollback_ledger_db( + node_type, + ledger_db, + l2_target, + l1_target, + last_sequencer_commitment_l2_height, + ) }); let state_db_rollback_handle = From e9d56256c15d20c8d19672e5af0d34aa18b2453f Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Tue, 25 Feb 2025 22:33:56 +0300 Subject: [PATCH 30/45] Remove repetition --- .../src/rollback/components/ledger_db/mod.rs | 43 ++++++------------- 1 file changed, 13 insertions(+), 30 deletions(-) diff --git a/crates/storage-ops/src/rollback/components/ledger_db/mod.rs b/crates/storage-ops/src/rollback/components/ledger_db/mod.rs index e261ee7ada..5e60bb7a47 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/mod.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/mod.rs @@ -23,34 +23,17 @@ pub(crate) fn rollback_ledger_db( target_l2, target_l1 ); - match node_type { - StorageNodeType::Sequencer => { - log_result_or_error!( - "soft_confirmations", - rollback_soft_confirmations(node_type, &ledger_db, target_l2) - ); - log_result_or_error!("slots", rollback_slots(node_type, &ledger_db, target_l1)); - } - StorageNodeType::FullNode => { - log_result_or_error!( - "soft_confirmations", - rollback_soft_confirmations(node_type, &ledger_db, target_l2) - ); - log_result_or_error!("slots", rollback_slots(node_type, &ledger_db, target_l1)); - } - StorageNodeType::BatchProver => { - log_result_or_error!( - "soft_confirmations", - rollback_soft_confirmations(node_type, &ledger_db, target_l2) - ); - log_result_or_error!("slots", rollback_slots(node_type, &ledger_db, target_l1)); - } - StorageNodeType::LightClient => { - log_result_or_error!( - "soft_confirmations", - rollback_soft_confirmations(node_type, &ledger_db, target_l2) - ); - log_result_or_error!("slots", rollback_slots(node_type, &ledger_db, target_l1)); - } - } + log_result_or_error!( + "soft_confirmations", + rollback_soft_confirmations(node_type, &ledger_db, target_l2) + ); + log_result_or_error!( + "slots", + rollback_slots( + node_type, + &ledger_db, + target_l1, + last_sequencer_commitment_l2_height, + ) + ); } From 15fa4ffb1f7dbd38bdfb9cf9240fa9c8188484ba Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Wed, 26 Feb 2025 14:42:26 +0300 Subject: [PATCH 31/45] Suppress panic output --- bin/citrea/tests/mock/rollback.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index e85d07e2f7..d8b65af1fb 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -1,5 +1,5 @@ use std::net::SocketAddr; -use std::panic::AssertUnwindSafe; +use std::panic::{self, AssertUnwindSafe}; use std::path::Path; use std::str::FromStr; use std::sync::Arc; @@ -208,6 +208,10 @@ async fn assert_dbs(test_client: &TestClient, addr: Address, at_block: u64, bala // Check soft confirmations have been rolled back in Ledger DB wait_for_l2_block(&test_client, at_block, None).await; + // Suppress output of panics + let prev_hook = panic::take_hook(); + panic::set_hook(Box::new(|_| {})); + // Check state DB is rolled back. let get_balance_result = test_client .eth_get_balance( @@ -226,6 +230,7 @@ async fn assert_dbs(test_client: &TestClient, addr: Address, at_block: u64, bala .catch_unwind() .await; assert!(check_block_by_number_result.is_err()); + panic::set_hook(prev_hook); // Should NOT panic as the data we're requesting here is correct test_client From 7ef760c2373f2484bbb23fd4467094b65225aa33 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Wed, 26 Feb 2025 14:43:19 +0300 Subject: [PATCH 32/45] Pass last commitment l2 height --- bin/citrea/tests/mock/rollback.rs | 13 ++++++++++--- .../src/rollback/components/ledger_db/mod.rs | 9 +++------ .../components/ledger_db/soft_confirmations.rs | 16 +++++++++++++--- crates/storage-ops/src/rollback/service.rs | 17 ++++++++++++----- 4 files changed, 38 insertions(+), 17 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index d8b65af1fb..74a87e65b2 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -166,8 +166,9 @@ async fn rollback_node( tables: &[&str], old_path: &Path, new_path: &Path, - rollback_to_l2: u64, - rollback_to_l1: u64, + rollback_l2_height: u64, + rollback_l1_height: u64, + commitment_l2_height: u64, ) -> anyhow::Result<()> { copy_db_dir_recursive(&old_path, &new_path).unwrap(); @@ -175,7 +176,13 @@ async fn rollback_node( let rollback = Rollback::new(ledger_db.inner(), state_db.clone(), native_db.clone()); rollback - .execute(node_type, 50, rollback_to_l2, rollback_to_l1) + .execute( + node_type, + 50, + rollback_l2_height, + rollback_l1_height, + commitment_l2_height, + ) .await .unwrap(); diff --git a/crates/storage-ops/src/rollback/components/ledger_db/mod.rs b/crates/storage-ops/src/rollback/components/ledger_db/mod.rs index 5e60bb7a47..762e4f31b5 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/mod.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/mod.rs @@ -25,15 +25,12 @@ pub(crate) fn rollback_ledger_db( log_result_or_error!( "soft_confirmations", - rollback_soft_confirmations(node_type, &ledger_db, target_l2) - ); - log_result_or_error!( - "slots", - rollback_slots( + rollback_soft_confirmations( node_type, &ledger_db, - target_l1, + target_l2, last_sequencer_commitment_l2_height, ) ); + log_result_or_error!("slots", rollback_slots(node_type, &ledger_db, target_l1,)); } diff --git a/crates/storage-ops/src/rollback/components/ledger_db/soft_confirmations.rs b/crates/storage-ops/src/rollback/components/ledger_db/soft_confirmations.rs index ff71daf194..72b534f6dd 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/soft_confirmations.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/soft_confirmations.rs @@ -1,4 +1,4 @@ -use sov_db::schema::tables::SoftConfirmationByNumber; +use sov_db::schema::tables::{LastSequencerCommitmentSent, SoftConfirmationByNumber}; use sov_db::schema::types::SoftConfirmationNumber; use sov_schema_db::{ScanDirection, DB}; @@ -8,7 +8,8 @@ use crate::utils::delete_soft_confirmations_by_number; pub(crate) fn rollback_soft_confirmations( node_type: StorageNodeType, ledger_db: &DB, - down_to_block: u64, + target_l2: u64, + last_sequencer_commitment_l2_height: u64, ) -> anyhow::Result { let mut soft_confirmations = ledger_db.iter_with_direction::( Default::default(), @@ -24,10 +25,19 @@ pub(crate) fn rollback_soft_confirmations( let soft_confirmation_number = record.key; - if soft_confirmation_number <= SoftConfirmationNumber(down_to_block) { + if soft_confirmation_number <= SoftConfirmationNumber(target_l2) { break; } + if matches!(node_type, StorageNodeType::Sequencer) + || matches!(node_type, StorageNodeType::FullNode) + { + ledger_db.put::( + &(), + &SoftConfirmationNumber(last_sequencer_commitment_l2_height), + )?; + } + delete_soft_confirmations_by_number( node_type, ledger_db, diff --git a/crates/storage-ops/src/rollback/service.rs b/crates/storage-ops/src/rollback/service.rs index 7f343d0162..1932375a5c 100644 --- a/crates/storage-ops/src/rollback/service.rs +++ b/crates/storage-ops/src/rollback/service.rs @@ -6,13 +6,20 @@ use tracing::info; use super::Rollback; use crate::pruning::types::StorageNodeType; +pub struct RollbackSignal { + current_l2_height: u64, + target_l2: u64, + target_l1: u64, + last_commitment_l2_height: u64, +} + pub struct RollbackService { rollback: Rollback, - receiver: Receiver<(u64, u64, u64)>, + receiver: Receiver, } impl RollbackService { - pub fn new(rollback: Rollback, receiver: Receiver<(u64, u64, u64)>) -> Self { + pub fn new(rollback: Rollback, receiver: Receiver) -> Self { Self { rollback, receiver } } @@ -24,9 +31,9 @@ impl RollbackService { _ = cancellation_token.cancelled() => { return; }, - Some((current_l2_height, target_l2, target_l1)) = self.receiver.recv() => { - info!("Received signal to rollback to L2 {target_l2}, L1 {target_l1}"); - if let Err(e) = self.rollback.execute(node_type, current_l2_height, target_l2, target_l1).await { + Some(signal) = self.receiver.recv() => { + info!("Received signal to rollback to L2 {}, L1 {}", signal.target_l2, signal.target_l1); + if let Err(e) = self.rollback.execute(node_type, signal.current_l2_height, signal.target_l2, signal.target_l1, signal.last_commitment_l2_height).await { panic!("Could not rollback blocks: {:?}", e); } } From f7e991e24eafe301434bee3981f8bfc1b834176d Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Wed, 26 Feb 2025 14:43:47 +0300 Subject: [PATCH 33/45] Cleanup --- .../src/rollback/components/ledger_db/slots.rs | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs index ec5b1fcd20..2e9c285635 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs @@ -1,5 +1,5 @@ -use sov_db::schema::tables::{L2RangeByL1Height, LastSequencerCommitmentSent}; -use sov_db::schema::types::{SlotNumber, SoftConfirmationNumber}; +use sov_db::schema::tables::L2RangeByL1Height; +use sov_db::schema::types::SlotNumber; use sov_schema_db::{ScanDirection, DB}; use crate::pruning::types::StorageNodeType; @@ -9,7 +9,6 @@ pub(crate) fn rollback_slots( node_type: StorageNodeType, ledger_db: &DB, target_l1: u64, - last_sequencer_commitment_l2_height: u64, ) -> anyhow::Result { let mut slots_to_l2_range = ledger_db .iter_with_direction::(Default::default(), ScanDirection::Backward)?; @@ -23,20 +22,10 @@ pub(crate) fn rollback_slots( let slot_height = record.key; - if slot_height < SlotNumber(target_l1) { + if slot_height <= SlotNumber(target_l1) { break; } - if matches!(node_type, StorageNodeType::Sequencer) - || matches!(node_type, StorageNodeType::FullNode) - { - let slot_range = record.value; - ledger_db.put::( - &(), - &SoftConfirmationNumber(last_sequencer_commitment_l2_height), - )?; - } - delete_slots_by_number(node_type, ledger_db, slot_height)?; deleted += 1; From 918b7326cf56fdf2958f13884e4a9bf910868a06 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Wed, 26 Feb 2025 16:35:59 +0300 Subject: [PATCH 34/45] Make it pass --- bin/citrea/tests/mock/rollback.rs | 229 ++++++++++++++++++------------ 1 file changed, 140 insertions(+), 89 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index 74a87e65b2..8827eb7e3f 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -27,7 +27,7 @@ use sov_mock_da::{MockAddress, MockDaService}; use crate::common::client::TestClient; use crate::common::helpers::{ create_default_rollup_config, start_rollup, tempdir_with_children, wait_for_l1_block, - wait_for_l2_block, NodeMode, + wait_for_l2_block, wait_for_proof, NodeMode, }; use crate::common::{make_test_client, TEST_DATA_GENESIS_PATH}; use crate::mock::evm::init_test_rollup; @@ -194,7 +194,12 @@ async fn rollback_node( Ok(()) } -async fn fill_blocks(test_client: &TestClient, da_service: &MockDaService, addr: &Address) { +async fn fill_blocks( + test_client: &TestClient, + da_service: &MockDaService, + addr: &Address, + fullnode_test_client: Option<&TestClient>, +) { for i in 1..=50 { // send one ether to some address let _ = test_client @@ -207,6 +212,9 @@ async fn fill_blocks(test_client: &TestClient, da_service: &MockDaService, addr: if i % 10 == 0 { wait_for_l2_block(test_client, i, None).await; wait_for_l1_block(da_service, 3 + (i / 10), None).await; + if let Some(fullnode_test_client) = fullnode_test_client { + wait_for_proof(fullnode_test_client, 3 + ((i / 10) * 2), None).await; + } } } } @@ -248,7 +256,7 @@ async fn assert_dbs(test_client: &TestClient, addr: Address, at_block: u64, bala /// Trigger rollback DB data. #[tokio::test(flavor = "multi_thread")] async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { - // citrea::initialize_logging(tracing::Level::DEBUG); + citrea::initialize_logging(tracing::Level::DEBUG); let storage_dir = tempdir_with_children(&["DA", "sequencer"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); @@ -267,7 +275,7 @@ async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { let addr = Address::from_str("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92265").unwrap(); - fill_blocks(&seq_test_client, &da_service, &addr).await; + fill_blocks(&seq_test_client, &da_service, &addr, None).await; wait_for_l2_block(&seq_test_client, 50, None).await; @@ -282,37 +290,28 @@ async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { seq_task_manager.abort().await; - let new_sequencer_db_dir = storage_dir.path().join("sequencer2").to_path_buf(); - copy_db_dir_recursive(&sequencer_db_dir, &new_sequencer_db_dir).unwrap(); - - let (ledger_db, native_db, state_db) = - instantiate_dbs(&new_sequencer_db_dir, SEQUENCER_LEDGER_TABLES).unwrap(); - let rollback = Rollback::new(ledger_db.inner(), state_db.clone(), native_db.clone()); - // rollback 10 L2 blocks - let rollback_to_l2 = 40; + let rollback_l2_height = 30; // We have 8 L1 blocks by now and we want to rollback // the last one. - let rollback_to_l1 = 7; - rollback - .execute( - StorageNodeType::Sequencer, - 50, - rollback_to_l2, - rollback_to_l1, - ) - .await - .unwrap(); - - drop(rollback); - drop(state_db); - drop(native_db); - drop(ledger_db); + let rollback_l1_height = 6; + let new_sequencer_db_dir = storage_dir.path().join("sequencer2").to_path_buf(); + rollback_node( + StorageNodeType::Sequencer, + SEQUENCER_LEDGER_TABLES, + &sequencer_db_dir, + &new_sequencer_db_dir, + rollback_l2_height, + rollback_l1_height, + rollback_l2_height, + ) + .await + .unwrap(); let (seq_task_manager, seq_test_client, _) = start_sequencer(&new_sequencer_db_dir, &da_db_dir, true).await; - assert_dbs(&seq_test_client, addr, 40, 40000000000000000000).await; + assert_dbs(&seq_test_client, addr, 30, 30000000000000000000).await; seq_task_manager.abort().await; @@ -337,6 +336,9 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { } wait_for_l1_block(&da_service, 3, None).await; + //------------------ + // Start nodes + //------------------ let (seq_task_manager, seq_test_client, seq_port) = start_sequencer(&sequencer_db_dir, &da_db_dir, false).await; @@ -345,11 +347,17 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { let addr = Address::from_str("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92265").unwrap(); - fill_blocks(&seq_test_client, &da_service, &addr).await; + //------------------ + // Fill blocks + //------------------ + fill_blocks(&seq_test_client, &da_service, &addr, None).await; wait_for_l2_block(&seq_test_client, 50, None).await; wait_for_l2_block(&full_node_test_client, 50, None).await; + //------------------ + // Assert data + //------------------ let get_balance_result = seq_test_client .eth_get_balance(addr, Some(BlockId::Number(BlockNumberOrTag::Number(50)))) .await; @@ -371,11 +379,14 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { seq_task_manager.abort().await; full_node_task_manager.abort().await; + //------------------ + // Rollback + //------------------ // rollback 10 L2 blocks - let rollback_to_l2 = 40; + let rollback_l2_height = 30; // We have 8 L1 blocks by now and we want to rollback // the last one. - let rollback_to_l1 = 7; + let rollback_l1_height = 6; let new_sequencer_db_dir = storage_dir.path().join("sequencer2").to_path_buf(); rollback_node( @@ -383,24 +394,32 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { SEQUENCER_LEDGER_TABLES, &sequencer_db_dir, &new_sequencer_db_dir, - rollback_to_l2, - rollback_to_l1, + rollback_l2_height, + rollback_l1_height, + rollback_l2_height, ) .await .unwrap(); + //------------------ + // Assert state after rollback + //------------------ let new_full_node_db_dir = storage_dir.path().join("full-node2").to_path_buf(); rollback_node( StorageNodeType::FullNode, FULL_NODE_LEDGER_TABLES, &full_node_db_dir, &new_full_node_db_dir, - rollback_to_l2, - rollback_to_l1, + rollback_l2_height, + rollback_l1_height, + rollback_l2_height, ) .await .unwrap(); + //------------------ + // Make sure nodes are able to sync after rollback + //------------------ let new_sequencer_db_dir = storage_dir.path().join("sequencer3").to_path_buf(); copy_db_dir_recursive( &storage_dir.path().join("sequencer2"), @@ -419,13 +438,13 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { let (full_node_task_manager, full_node_test_client) = start_full_node(&new_full_node_db_dir, &da_db_dir, seq_port, true).await; - assert_dbs(&full_node_test_client, addr, 40, 40000000000000000000).await; + assert_dbs(&full_node_test_client, addr, 30, 30000000000000000000).await; for _ in 0..10 { seq_test_client.spam_publish_batch_request().await.unwrap(); } - wait_for_l2_block(&seq_test_client, 50, None).await; - wait_for_l2_block(&full_node_test_client, 50, None).await; + wait_for_l2_block(&seq_test_client, 40, None).await; + wait_for_l2_block(&full_node_test_client, 40, None).await; seq_task_manager.abort().await; full_node_task_manager.abort().await; @@ -452,6 +471,9 @@ async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { } wait_for_l1_block(&da_service, 3, None).await; + //------------------ + // Start nodes + //------------------ let (seq_task_manager, seq_test_client, seq_port) = start_sequencer(&sequencer_db_dir, &da_db_dir, false).await; @@ -463,11 +485,20 @@ async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { let addr = Address::from_str("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92265").unwrap(); - fill_blocks(&seq_test_client, &da_service, &addr).await; + fill_blocks( + &seq_test_client, + &da_service, + &addr, + Some(&full_node_test_client), + ) + .await; wait_for_l2_block(&full_node_test_client, 50, None).await; wait_for_l2_block(&batch_prover_test_client, 50, None).await; + //------------------ + // Assert sequencer state + //------------------ let get_balance_result = seq_test_client .eth_get_balance(addr, Some(BlockId::Number(BlockNumberOrTag::Number(50)))) .await; @@ -499,6 +530,9 @@ async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { full_node_task_manager.abort().await; batch_prover_task_manager.abort().await; + //------------------ + // Assert fullnode state + //------------------ let new_full_node_db_dir = storage_dir.path().join("full-node2").to_path_buf(); copy_db_dir_recursive(&full_node_db_dir, &new_full_node_db_dir).unwrap(); @@ -519,10 +553,9 @@ async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { .is_some()); // rollback 10 L2 blocks - let rollback_to_l2 = 40; - // We have 8 L1 blocks by now and we want to rollback - // the last one. - let rollback_to_l1 = 7; + let rollback_l2_height = 30; + // We have 9 L1 blocks by now and we want to rollback. + let rollback_l1_height = 9; let new_full_node_db_dir = storage_dir.path().join("full-node3").to_path_buf(); copy_db_dir_recursive( @@ -537,14 +570,18 @@ async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { ) .unwrap(); + //------------------ + // Rollback nodes + //------------------ let new_sequencer_db_dir = storage_dir.path().join("sequencer3").to_path_buf(); rollback_node( StorageNodeType::Sequencer, SEQUENCER_LEDGER_TABLES, &sequencer_db_dir, &new_sequencer_db_dir, - rollback_to_l2, - rollback_to_l1, + rollback_l2_height, + rollback_l1_height, + rollback_l2_height, ) .await .unwrap(); @@ -554,8 +591,9 @@ async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { FULL_NODE_LEDGER_TABLES, &full_node_db_dir, &new_full_node_db_dir, - rollback_to_l2, - rollback_to_l1, + rollback_l2_height, + rollback_l1_height, + rollback_l2_height, ) .await .unwrap(); @@ -565,90 +603,103 @@ async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { BATCH_PROVER_LEDGER_TABLES, &batch_prover_db_dir, &new_batch_prover_db_dir, - rollback_to_l2, - rollback_to_l1, + rollback_l2_height, + rollback_l1_height, + rollback_l2_height, ) .await .unwrap(); + //------------------ + // Assert state after re-sync + //------------------ let new_sequencer_db_dir = storage_dir.path().join("sequencer4").to_path_buf(); copy_db_dir_recursive( &storage_dir.path().join("sequencer3"), &new_sequencer_db_dir, ) .unwrap(); - let (seq_task_manager, seq_test_client, seq_port) = - start_sequencer(&new_sequencer_db_dir, &da_db_dir, true).await; - let new_full_node_db_dir = storage_dir.path().join("full-node4").to_path_buf(); copy_db_dir_recursive( &storage_dir.path().join("full-node3"), &new_full_node_db_dir, ) .unwrap(); - let (full_node_task_manager, full_node_test_client) = - start_full_node(&new_full_node_db_dir, &da_db_dir, seq_port, true).await; - let new_batch_prover_db_dir = storage_dir.path().join("batch-prover4").to_path_buf(); copy_db_dir_recursive( &storage_dir.path().join("batch-prover3"), &new_batch_prover_db_dir, ) .unwrap(); - let (batch_prover_task_manager, batch_prover_test_client) = - start_batch_prover(&new_batch_prover_db_dir, &da_db_dir, seq_port, true).await; - - assert_dbs(&batch_prover_test_client, addr, 40, 40000000000000000000).await; - - for _ in 0..10 { - seq_test_client.spam_publish_batch_request().await.unwrap(); - } - wait_for_l2_block(&seq_test_client, 50, None).await; - wait_for_l2_block(&full_node_test_client, 50, None).await; - wait_for_l2_block(&batch_prover_test_client, 50, None).await; - - seq_task_manager.abort().await; - full_node_task_manager.abort().await; - batch_prover_task_manager.abort().await; - let new_full_node_db_dir = storage_dir.path().join("full-node5").to_path_buf(); - copy_db_dir_recursive( - &storage_dir.path().join("full-node4"), - &new_full_node_db_dir, - ) - .unwrap(); - let new_batch_prover_db_dir = storage_dir.path().join("batch-prover5").to_path_buf(); - copy_db_dir_recursive( - &storage_dir.path().join("batch-prover4"), - &new_batch_prover_db_dir, - ) - .unwrap(); - - // At block 22, verified proof in full node should have been pruned. + // At block 11, verified proof in full node should have been pruned. let (fn_ledger_db, _, _) = instantiate_dbs(&new_full_node_db_dir, FULL_NODE_LEDGER_TABLES).unwrap(); let fn_ledger_db = fn_ledger_db.inner(); assert!(fn_ledger_db - .get::(&SlotNumber(7)) + .get::(&SlotNumber(9)) .unwrap() .is_some()); assert!(fn_ledger_db - .get::(&SlotNumber(9)) + .get::(&SlotNumber(11)) .unwrap() .is_none()); - // At block 22, verified proof in full node should have been pruned. + // At block 11, verified proof in prover should have been pruned. let (bp_ledger_db, _, _) = instantiate_dbs(&new_batch_prover_db_dir, BATCH_PROVER_LEDGER_TABLES).unwrap(); let bp_ledger_db = bp_ledger_db.inner(); assert!(bp_ledger_db - .get::(&SlotNumber(7)) + .get::(&SlotNumber(8)) .unwrap() .is_some()); assert!(bp_ledger_db - .get::(&SlotNumber(9)) + .get::(&SlotNumber(10)) .unwrap() .is_none()); + //------------------ + // Start nodes and make sure they are able to sync + //------------------ + let new_sequencer_db_dir = storage_dir.path().join("sequencer5").to_path_buf(); + copy_db_dir_recursive( + &storage_dir.path().join("sequencer4"), + &new_sequencer_db_dir, + ) + .unwrap(); + let (seq_task_manager, seq_test_client, seq_port) = + start_sequencer(&new_sequencer_db_dir, &da_db_dir, true).await; + + let new_full_node_db_dir = storage_dir.path().join("full-node5").to_path_buf(); + copy_db_dir_recursive( + &storage_dir.path().join("full-node4"), + &new_full_node_db_dir, + ) + .unwrap(); + let (full_node_task_manager, full_node_test_client) = + start_full_node(&new_full_node_db_dir, &da_db_dir, seq_port, true).await; + + let new_batch_prover_db_dir = storage_dir.path().join("batch-prover5").to_path_buf(); + copy_db_dir_recursive( + &storage_dir.path().join("batch-prover4"), + &new_batch_prover_db_dir, + ) + .unwrap(); + let (batch_prover_task_manager, batch_prover_test_client) = + start_batch_prover(&new_batch_prover_db_dir, &da_db_dir, seq_port, true).await; + + assert_dbs(&batch_prover_test_client, addr, 30, 30000000000000000000).await; + + for _ in 0..10 { + seq_test_client.spam_publish_batch_request().await.unwrap(); + } + wait_for_l2_block(&seq_test_client, 40, None).await; + wait_for_l2_block(&full_node_test_client, 40, None).await; + wait_for_l2_block(&batch_prover_test_client, 40, None).await; + + seq_task_manager.abort().await; + full_node_task_manager.abort().await; + batch_prover_task_manager.abort().await; + Ok(()) } From 6ba427aa97b3ffe830f83fd17ceed86d17d8b667 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Wed, 26 Feb 2025 16:39:02 +0300 Subject: [PATCH 35/45] Clippy --- bin/citrea/tests/mock/rollback.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index 8827eb7e3f..d01634ab0d 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -170,9 +170,9 @@ async fn rollback_node( rollback_l1_height: u64, commitment_l2_height: u64, ) -> anyhow::Result<()> { - copy_db_dir_recursive(&old_path, &new_path).unwrap(); + copy_db_dir_recursive(old_path, new_path).unwrap(); - let (ledger_db, native_db, state_db) = instantiate_dbs(&new_path, tables).unwrap(); + let (ledger_db, native_db, state_db) = instantiate_dbs(new_path, tables).unwrap(); let rollback = Rollback::new(ledger_db.inner(), state_db.clone(), native_db.clone()); rollback @@ -221,7 +221,7 @@ async fn fill_blocks( async fn assert_dbs(test_client: &TestClient, addr: Address, at_block: u64, balance: u128) { // Check soft confirmations have been rolled back in Ledger DB - wait_for_l2_block(&test_client, at_block, None).await; + wait_for_l2_block(test_client, at_block, None).await; // Suppress output of panics let prev_hook = panic::take_hook(); From 6c6c8375a094bab989e7aebce5e4cbedd80879a7 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Wed, 26 Feb 2025 16:44:53 +0300 Subject: [PATCH 36/45] Disable logs in tests --- bin/citrea/tests/mock/rollback.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index d01634ab0d..46f59dfbe9 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -256,7 +256,7 @@ async fn assert_dbs(test_client: &TestClient, addr: Address, at_block: u64, bala /// Trigger rollback DB data. #[tokio::test(flavor = "multi_thread")] async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { - citrea::initialize_logging(tracing::Level::DEBUG); + // citrea::initialize_logging(tracing::Level::DEBUG); let storage_dir = tempdir_with_children(&["DA", "sequencer"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); @@ -321,7 +321,7 @@ async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { /// Trigger rollback DB data. #[tokio::test(flavor = "multi_thread")] async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { - citrea::initialize_logging(tracing::Level::DEBUG); + // citrea::initialize_logging(tracing::Level::DEBUG); let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); @@ -455,7 +455,7 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { /// Trigger rollback DB data. #[tokio::test(flavor = "multi_thread")] async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { - citrea::initialize_logging(tracing::Level::DEBUG); + // citrea::initialize_logging(tracing::Level::DEBUG); let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node", "batch-prover"]); let da_db_dir = storage_dir.path().join("DA").to_path_buf(); From 38d3e9b76f184651b1c41fa63a042e997a23dcb3 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Thu, 27 Feb 2025 13:04:04 +0300 Subject: [PATCH 37/45] Add test for fullnode rollbac/re-sync without rollinb back sequencer --- bin/citrea/tests/mock/rollback.rs | 93 +++++++++++++++++++++++++++++++ crates/storage-ops/src/utils.rs | 1 - 2 files changed, 93 insertions(+), 1 deletion(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index 46f59dfbe9..ed27259453 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -452,6 +452,99 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { Ok(()) } +/// Trigger rollback DB data. +/// This test makes sure that a rollback on fullnode withour rolling back sequencer +/// enables fullnode to sync from the rollback point up until latest sequencer block. +#[tokio::test(flavor = "multi_thread")] +async fn test_fullnode_rollback_without_sequencer_rollback() -> Result<(), anyhow::Error> { + // citrea::initialize_logging(tracing::Level::DEBUG); + + let storage_dir = tempdir_with_children(&["DA", "sequencer", "full-node"]); + let da_db_dir = storage_dir.path().join("DA").to_path_buf(); + let sequencer_db_dir = storage_dir.path().join("sequencer").to_path_buf(); + let full_node_db_dir = storage_dir.path().join("full-node").to_path_buf(); + + let da_service = MockDaService::new(MockAddress::default(), &da_db_dir.clone()); + + // start rollup on da block 3 + for _ in 0..3 { + da_service.publish_test_block().await.unwrap(); + } + wait_for_l1_block(&da_service, 3, None).await; + + //------------------ + // Start nodes + //------------------ + let (seq_task_manager, seq_test_client, seq_port) = + start_sequencer(&sequencer_db_dir, &da_db_dir, false).await; + + let (full_node_task_manager, full_node_test_client) = + start_full_node(&full_node_db_dir, &da_db_dir, seq_port, false).await; + + let addr = Address::from_str("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92265").unwrap(); + + //------------------ + // Fill blocks + //------------------ + fill_blocks(&seq_test_client, &da_service, &addr, None).await; + + wait_for_l2_block(&seq_test_client, 50, None).await; + wait_for_l2_block(&full_node_test_client, 50, None).await; + + seq_task_manager.abort().await; + full_node_task_manager.abort().await; + + //------------------ + // Rollback + //------------------ + // rollback 10 L2 blocks + let rollback_l2_height = 30; + // We have 8 L1 blocks by now and we want to rollback + // the last one. + let rollback_l1_height = 6; + + let new_full_node_db_dir = storage_dir.path().join("full-node2").to_path_buf(); + rollback_node( + StorageNodeType::FullNode, + FULL_NODE_LEDGER_TABLES, + &full_node_db_dir, + &new_full_node_db_dir, + rollback_l2_height, + rollback_l1_height, + rollback_l2_height, + ) + .await + .unwrap(); + + //------------------ + // Make sure nodes are able to sync after rollback + //------------------ + let new_sequencer_db_dir = storage_dir.path().join("sequencer2").to_path_buf(); + copy_db_dir_recursive(&sequencer_db_dir, &new_sequencer_db_dir).unwrap(); + let (seq_task_manager, seq_test_client, seq_port) = + start_sequencer(&new_sequencer_db_dir, &da_db_dir, true).await; + + let new_full_node_db_dir = storage_dir.path().join("full-node3").to_path_buf(); + copy_db_dir_recursive( + &storage_dir.path().join("full-node2"), + &new_full_node_db_dir, + ) + .unwrap(); + let (full_node_task_manager, full_node_test_client) = + start_full_node(&new_full_node_db_dir, &da_db_dir, seq_port, true).await; + + for _ in 0..10 { + seq_test_client.spam_publish_batch_request().await.unwrap(); + } + wait_for_l2_block(&seq_test_client, 40, None).await; + wait_for_l2_block(&full_node_test_client, 40, None).await; + + seq_task_manager.abort().await; + full_node_task_manager.abort().await; + + Ok(()) +} + /// Trigger rollback DB data. #[tokio::test(flavor = "multi_thread")] async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { diff --git a/crates/storage-ops/src/utils.rs b/crates/storage-ops/src/utils.rs index 1b4f0f53d1..68c12f5f3e 100644 --- a/crates/storage-ops/src/utils.rs +++ b/crates/storage-ops/src/utils.rs @@ -75,7 +75,6 @@ fn delete_short_header_proofs(ledger_db: &DB, slot_number: SlotNumber) -> anyhow // TODO for pruning this should be less than if record.value > slot_number { - println!("Deleting slot short proof: {:?}", record.key); ledger_db.delete::(&record.key)?; } } From 57d95a125dc8341f376dfccc5d646114bba80df5 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Thu, 27 Feb 2025 14:16:37 +0300 Subject: [PATCH 38/45] Check sequencer commitments --- bin/citrea/tests/mock/rollback.rs | 62 ++++++++++++++++++++++++------- 1 file changed, 48 insertions(+), 14 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index ed27259453..c8b3c2bf9b 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -23,6 +23,7 @@ use sov_db::schema::tables::{ use sov_db::schema::types::SlotNumber; use sov_db::state_db::StateDB; use sov_mock_da::{MockAddress, MockDaService}; +use sov_rollup_interface::rpc::SequencerCommitmentResponse; use crate::common::client::TestClient; use crate::common::helpers::{ @@ -219,9 +220,15 @@ async fn fill_blocks( } } -async fn assert_dbs(test_client: &TestClient, addr: Address, at_block: u64, balance: u128) { +async fn assert_dbs( + test_client: &TestClient, + addr: Address, + check_l1_block: Option, + check_l2_block: u64, + balance_at_l2_height: u128, +) { // Check soft confirmations have been rolled back in Ledger DB - wait_for_l2_block(test_client, at_block, None).await; + wait_for_l2_block(test_client, check_l2_block, None).await; // Suppress output of panics let prev_hook = panic::take_hook(); @@ -231,26 +238,39 @@ async fn assert_dbs(test_client: &TestClient, addr: Address, at_block: u64, bala let get_balance_result = test_client .eth_get_balance( addr, - Some(BlockId::Number(BlockNumberOrTag::Number(at_block))), + Some(BlockId::Number(BlockNumberOrTag::Number(check_l2_block))), ) .await; assert!(get_balance_result.is_ok()); - assert_eq!(get_balance_result.unwrap(), U256::from(balance)); + assert_eq!( + get_balance_result.unwrap(), + U256::from(balance_at_l2_height) + ); // Check native DB is rolled back - let check_block_by_number_result = AssertUnwindSafe( - test_client - .eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Number(at_block + 1))), - ) - .catch_unwind() - .await; + let check_block_by_number_result = + AssertUnwindSafe(test_client.eth_get_block_by_number_with_detail(Some( + BlockNumberOrTag::Number(check_l2_block + 1), + ))) + .catch_unwind() + .await; assert!(check_block_by_number_result.is_err()); panic::set_hook(prev_hook); // Should NOT panic as the data we're requesting here is correct test_client - .eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Number(at_block))) + .eth_get_block_by_number_with_detail(Some(BlockNumberOrTag::Number(check_l2_block))) .await; + + let Some(check_l1_block) = check_l1_block else { + return; + }; + let commitments: Vec = test_client + .ledger_get_sequencer_commitments_on_slot_by_number(check_l1_block) + .await + .unwrap() + .unwrap(); + assert_eq!(commitments.len(), 1); } /// Trigger rollback DB data. @@ -311,7 +331,7 @@ async fn test_sequencer_rollback() -> Result<(), anyhow::Error> { let (seq_task_manager, seq_test_client, _) = start_sequencer(&new_sequencer_db_dir, &da_db_dir, true).await; - assert_dbs(&seq_test_client, addr, 30, 30000000000000000000).await; + assert_dbs(&seq_test_client, addr, None, 30, 30000000000000000000).await; seq_task_manager.abort().await; @@ -438,7 +458,14 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { let (full_node_task_manager, full_node_test_client) = start_full_node(&new_full_node_db_dir, &da_db_dir, seq_port, true).await; - assert_dbs(&full_node_test_client, addr, 30, 30000000000000000000).await; + assert_dbs( + &full_node_test_client, + addr, + rollback_l1_height, + 30, + 30000000000000000000, + ) + .await; for _ in 0..10 { seq_test_client.spam_publish_batch_request().await.unwrap(); @@ -781,7 +808,14 @@ async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { let (batch_prover_task_manager, batch_prover_test_client) = start_batch_prover(&new_batch_prover_db_dir, &da_db_dir, seq_port, true).await; - assert_dbs(&batch_prover_test_client, addr, 30, 30000000000000000000).await; + assert_dbs( + &batch_prover_test_client, + addr, + None, + 30, + 30000000000000000000, + ) + .await; for _ in 0..10 { seq_test_client.spam_publish_batch_request().await.unwrap(); From 55dd86f767d05252675e4306342cabab7c30af59 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Thu, 27 Feb 2025 14:16:51 +0300 Subject: [PATCH 39/45] Move check outside the loop --- .../components/ledger_db/soft_confirmations.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/crates/storage-ops/src/rollback/components/ledger_db/soft_confirmations.rs b/crates/storage-ops/src/rollback/components/ledger_db/soft_confirmations.rs index 72b534f6dd..abeaae433a 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/soft_confirmations.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/soft_confirmations.rs @@ -29,15 +29,6 @@ pub(crate) fn rollback_soft_confirmations( break; } - if matches!(node_type, StorageNodeType::Sequencer) - || matches!(node_type, StorageNodeType::FullNode) - { - ledger_db.put::( - &(), - &SoftConfirmationNumber(last_sequencer_commitment_l2_height), - )?; - } - delete_soft_confirmations_by_number( node_type, ledger_db, @@ -48,5 +39,14 @@ pub(crate) fn rollback_soft_confirmations( deleted += 1; } + if matches!(node_type, StorageNodeType::Sequencer) + || matches!(node_type, StorageNodeType::FullNode) + { + ledger_db.put::( + &(), + &SoftConfirmationNumber(last_sequencer_commitment_l2_height), + )?; + } + Ok(deleted) } From 3048538ccbc9085da4ed2f0f78bae36dde875772 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Thu, 27 Feb 2025 14:27:14 +0300 Subject: [PATCH 40/45] Stop using L1RangeByl1Height --- bin/citrea/tests/mock/rollback.rs | 2 +- .../rollback/components/ledger_db/slots.rs | 10 +++--- crates/storage-ops/src/utils.rs | 36 +++++++------------ 3 files changed, 18 insertions(+), 30 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index c8b3c2bf9b..bee8da834e 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -461,7 +461,7 @@ async fn test_fullnode_rollback() -> Result<(), anyhow::Error> { assert_dbs( &full_node_test_client, addr, - rollback_l1_height, + Some(rollback_l1_height), 30, 30000000000000000000, ) diff --git a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs index 2e9c285635..5cc9bafd56 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs @@ -1,4 +1,4 @@ -use sov_db::schema::tables::L2RangeByL1Height; +use sov_db::schema::tables::CommitmentsByNumber; use sov_db::schema::types::SlotNumber; use sov_schema_db::{ScanDirection, DB}; @@ -10,12 +10,12 @@ pub(crate) fn rollback_slots( ledger_db: &DB, target_l1: u64, ) -> anyhow::Result { - let mut slots_to_l2_range = ledger_db - .iter_with_direction::(Default::default(), ScanDirection::Backward)?; - slots_to_l2_range.seek_to_last(); + let mut commitments_by_number = ledger_db + .iter_with_direction::(Default::default(), ScanDirection::Backward)?; + commitments_by_number.seek_to_last(); let mut deleted = 0; - for record in slots_to_l2_range { + for record in commitments_by_number { let Ok(record) = record else { continue; }; diff --git a/crates/storage-ops/src/utils.rs b/crates/storage-ops/src/utils.rs index 68c12f5f3e..7a6643f33e 100644 --- a/crates/storage-ops/src/utils.rs +++ b/crates/storage-ops/src/utils.rs @@ -40,11 +40,8 @@ pub(crate) fn delete_slots_by_number( ledger_db.delete::(&slot_number)?; ledger_db.delete::(&slot_number)?; - if !matches!(node_type, StorageNodeType::LightClient) { - delete_short_header_proofs(ledger_db, slot_number)?; - } if !matches!(node_type, StorageNodeType::Sequencer) { - delete_slot_by_hash(ledger_db, slot_number)?; + delete_slot_by_hash(node_type, ledger_db, slot_number)?; } if matches!(node_type, StorageNodeType::FullNode) { @@ -63,7 +60,11 @@ pub(crate) fn delete_slots_by_number( Ok(()) } -fn delete_short_header_proofs(ledger_db: &DB, slot_number: SlotNumber) -> anyhow::Result<()> { +fn delete_slot_by_hash( + node_type: StorageNodeType, + ledger_db: &DB, + slot_number: SlotNumber, +) -> anyhow::Result<()> { let mut slots = ledger_db.iter_with_direction::(Default::default(), ScanDirection::Backward)?; slots.seek_to_last(); @@ -73,28 +74,15 @@ fn delete_short_header_proofs(ledger_db: &DB, slot_number: SlotNumber) -> anyhow continue; }; - // TODO for pruning this should be less than - if record.value > slot_number { - ledger_db.delete::(&record.key)?; + if record.value < slot_number { + break; } - } - - Ok(()) -} -fn delete_slot_by_hash(ledger_db: &DB, slot_number: SlotNumber) -> anyhow::Result<()> { - let mut slots = - ledger_db.iter_with_direction::(Default::default(), ScanDirection::Forward)?; - slots.seek_to_first(); - - for record in slots { - let Ok(record) = record else { - continue; - }; - - if record.value < slot_number { - ledger_db.delete::(&record.key)?; + if !matches!(node_type, StorageNodeType::LightClient) { + ledger_db.delete::(&record.key)?; } + + ledger_db.delete::(&record.key)?; } Ok(()) From 4ecdd0bc0f2e1b6ab762f1369daa70041209db3f Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Thu, 27 Feb 2025 14:30:08 +0300 Subject: [PATCH 41/45] Rollback light client slots differently --- .../src/rollback/components/ledger_db/mod.rs | 14 ++++++-- .../rollback/components/ledger_db/slots.rs | 33 ++++++++++++++++++- 2 files changed, 44 insertions(+), 3 deletions(-) diff --git a/crates/storage-ops/src/rollback/components/ledger_db/mod.rs b/crates/storage-ops/src/rollback/components/ledger_db/mod.rs index 762e4f31b5..636b17d887 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/mod.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use slots::rollback_slots; +use slots::{rollback_light_client_slots, rollback_slots}; use soft_confirmations::rollback_soft_confirmations; use tracing::debug; @@ -32,5 +32,15 @@ pub(crate) fn rollback_ledger_db( last_sequencer_commitment_l2_height, ) ); - log_result_or_error!("slots", rollback_slots(node_type, &ledger_db, target_l1,)); + match node_type { + StorageNodeType::LightClient => { + log_result_or_error!( + "slots", + rollback_light_client_slots(node_type, &ledger_db, target_l1,) + ); + } + _ => { + log_result_or_error!("slots", rollback_slots(node_type, &ledger_db, target_l1,)); + } + } } diff --git a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs index 5cc9bafd56..a6b98b6696 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs @@ -1,4 +1,4 @@ -use sov_db::schema::tables::CommitmentsByNumber; +use sov_db::schema::tables::{CommitmentsByNumber, LightClientProofBySlotNumber}; use sov_db::schema::types::SlotNumber; use sov_schema_db::{ScanDirection, DB}; @@ -33,3 +33,34 @@ pub(crate) fn rollback_slots( Ok(deleted) } + +pub(crate) fn rollback_light_client_slots( + node_type: StorageNodeType, + ledger_db: &DB, + target_l1: u64, +) -> anyhow::Result { + let mut proof_by_slot_number = ledger_db.iter_with_direction::( + Default::default(), + ScanDirection::Backward, + )?; + proof_by_slot_number.seek_to_last(); + + let mut deleted = 0; + for record in proof_by_slot_number { + let Ok(record) = record else { + continue; + }; + + let slot_height = record.key; + + if slot_height <= SlotNumber(target_l1) { + break; + } + + delete_slots_by_number(node_type, ledger_db, slot_height)?; + + deleted += 1; + } + + Ok(deleted) +} From cdee744fd51de4b93f52b78fb34e19ec7562b205 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Thu, 27 Feb 2025 14:58:53 +0300 Subject: [PATCH 42/45] Iterate verified proofs --- crates/storage-ops/src/utils.rs | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/crates/storage-ops/src/utils.rs b/crates/storage-ops/src/utils.rs index 7a6643f33e..820ab1e0c6 100644 --- a/crates/storage-ops/src/utils.rs +++ b/crates/storage-ops/src/utils.rs @@ -45,7 +45,7 @@ pub(crate) fn delete_slots_by_number( } if matches!(node_type, StorageNodeType::FullNode) { - ledger_db.delete::(&slot_number)?; + delete_verified_proofs_by_slot_number(ledger_db, slot_number)?; } if matches!(node_type, StorageNodeType::BatchProver) { @@ -87,3 +87,26 @@ fn delete_slot_by_hash( Ok(()) } + +fn delete_verified_proofs_by_slot_number( + ledger_db: &DB, + slot_number: SlotNumber, +) -> anyhow::Result<()> { + let mut verified_proofs_by_number = ledger_db + .iter_with_direction::( + Default::default(), + ScanDirection::Backward, + )?; + verified_proofs_by_number.seek_to_last(); + + for record in verified_proofs_by_number { + let Ok(record) = record else { + continue; + }; + if record.key >= slot_number { + ledger_db.delete::(&record.key)?; + } + } + + Ok(()) +} From 6c2c770b89c32226f51750913072d85f2279c12c Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Fri, 28 Feb 2025 11:23:43 +0300 Subject: [PATCH 43/45] Differentiate pruning logic from rollback --- .../src/pruning/components/ledger_db/slots.rs | 68 ++++++++++++++++++- .../rollback/components/ledger_db/slots.rs | 64 ++++++++++++++++- crates/storage-ops/src/utils.rs | 66 +----------------- 3 files changed, 132 insertions(+), 66 deletions(-) diff --git a/crates/storage-ops/src/pruning/components/ledger_db/slots.rs b/crates/storage-ops/src/pruning/components/ledger_db/slots.rs index c3e9778ae6..c8c76939d6 100644 --- a/crates/storage-ops/src/pruning/components/ledger_db/slots.rs +++ b/crates/storage-ops/src/pruning/components/ledger_db/slots.rs @@ -1,5 +1,7 @@ -use sov_db::schema::tables::L2RangeByL1Height; -use sov_db::schema::types::SoftConfirmationNumber; +use sov_db::schema::tables::{ + L2RangeByL1Height, ShortHeaderProofBySlotHash, SlotByHash, VerifiedBatchProofsBySlotNumber, +}; +use sov_db::schema::types::{SlotNumber, SoftConfirmationNumber}; use sov_schema_db::{ScanDirection, DB}; use crate::pruning::types::StorageNodeType; @@ -29,8 +31,70 @@ pub(crate) fn prune_slots( delete_slots_by_number(node_type, ledger_db, slot_height)?; + if !matches!(node_type, StorageNodeType::Sequencer) { + prune_slot_by_hash(node_type, ledger_db, slot_height)?; + } + + if matches!(node_type, StorageNodeType::FullNode) { + prune_verified_proofs_by_slot_number(ledger_db, slot_height)?; + } + deleted += 1; } Ok(deleted) } + +fn prune_slot_by_hash( + node_type: StorageNodeType, + ledger_db: &DB, + slot_number: SlotNumber, +) -> anyhow::Result<()> { + let mut slots = + ledger_db.iter_with_direction::(Default::default(), ScanDirection::Forward)?; + slots.seek_to_first(); + + for record in slots { + let Ok(record) = record else { + continue; + }; + + if record.value > slot_number { + break; + } + + if !matches!(node_type, StorageNodeType::LightClient) { + ledger_db.delete::(&record.key)?; + } + + ledger_db.delete::(&record.key)?; + } + + Ok(()) +} + +fn prune_verified_proofs_by_slot_number( + ledger_db: &DB, + slot_number: SlotNumber, +) -> anyhow::Result<()> { + let mut verified_proofs_by_number = ledger_db + .iter_with_direction::( + Default::default(), + ScanDirection::Forward, + )?; + verified_proofs_by_number.seek_to_first(); + + for record in verified_proofs_by_number { + let Ok(record) = record else { + continue; + }; + + if record.key > slot_number { + break; + } + + ledger_db.delete::(&record.key)?; + } + + Ok(()) +} diff --git a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs index a6b98b6696..1c65e3cadf 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs @@ -1,4 +1,7 @@ -use sov_db::schema::tables::{CommitmentsByNumber, LightClientProofBySlotNumber}; +use sov_db::schema::tables::{ + CommitmentsByNumber, LightClientProofBySlotNumber, ShortHeaderProofBySlotHash, SlotByHash, + VerifiedBatchProofsBySlotNumber, +}; use sov_db::schema::types::SlotNumber; use sov_schema_db::{ScanDirection, DB}; @@ -59,8 +62,67 @@ pub(crate) fn rollback_light_client_slots( delete_slots_by_number(node_type, ledger_db, slot_height)?; + if !matches!(node_type, StorageNodeType::Sequencer) { + rollback_slot_by_hash(node_type, ledger_db, slot_height)?; + } + + if matches!(node_type, StorageNodeType::FullNode) { + rollback_verified_proofs_by_slot_number(ledger_db, slot_height)?; + } + deleted += 1; } Ok(deleted) } + +fn rollback_slot_by_hash( + node_type: StorageNodeType, + ledger_db: &DB, + slot_number: SlotNumber, +) -> anyhow::Result<()> { + let mut slots = + ledger_db.iter_with_direction::(Default::default(), ScanDirection::Backward)?; + slots.seek_to_last(); + + for record in slots { + let Ok(record) = record else { + continue; + }; + + if record.value < slot_number { + break; + } + + if !matches!(node_type, StorageNodeType::LightClient) { + ledger_db.delete::(&record.key)?; + } + + ledger_db.delete::(&record.key)?; + } + + Ok(()) +} + +fn rollback_verified_proofs_by_slot_number( + ledger_db: &DB, + slot_number: SlotNumber, +) -> anyhow::Result<()> { + let mut verified_proofs_by_number = ledger_db + .iter_with_direction::( + Default::default(), + ScanDirection::Backward, + )?; + verified_proofs_by_number.seek_to_last(); + + for record in verified_proofs_by_number { + let Ok(record) = record else { + continue; + }; + if record.key >= slot_number { + ledger_db.delete::(&record.key)?; + } + } + + Ok(()) +} diff --git a/crates/storage-ops/src/utils.rs b/crates/storage-ops/src/utils.rs index 820ab1e0c6..d84e054857 100644 --- a/crates/storage-ops/src/utils.rs +++ b/crates/storage-ops/src/utils.rs @@ -1,11 +1,10 @@ use sov_db::schema::tables::{ CommitmentsByNumber, L2RangeByL1Height, L2Witness, LightClientProofBySlotNumber, - ProofsBySlotNumber, ProofsBySlotNumberV2, ProverStateDiffs, ShortHeaderProofBySlotHash, - SlotByHash, SoftConfirmationByHash, SoftConfirmationByNumber, SoftConfirmationStatus, - VerifiedBatchProofsBySlotNumber, + ProofsBySlotNumber, ProofsBySlotNumberV2, ProverStateDiffs, SoftConfirmationByHash, + SoftConfirmationByNumber, SoftConfirmationStatus, }; use sov_db::schema::types::{DbHash, SlotNumber, SoftConfirmationNumber}; -use sov_schema_db::{ScanDirection, DB}; +use sov_schema_db::DB; use crate::pruning::types::StorageNodeType; @@ -40,14 +39,6 @@ pub(crate) fn delete_slots_by_number( ledger_db.delete::(&slot_number)?; ledger_db.delete::(&slot_number)?; - if !matches!(node_type, StorageNodeType::Sequencer) { - delete_slot_by_hash(node_type, ledger_db, slot_number)?; - } - - if matches!(node_type, StorageNodeType::FullNode) { - delete_verified_proofs_by_slot_number(ledger_db, slot_number)?; - } - if matches!(node_type, StorageNodeType::BatchProver) { ledger_db.delete::(&slot_number)?; ledger_db.delete::(&slot_number)?; @@ -59,54 +50,3 @@ pub(crate) fn delete_slots_by_number( Ok(()) } - -fn delete_slot_by_hash( - node_type: StorageNodeType, - ledger_db: &DB, - slot_number: SlotNumber, -) -> anyhow::Result<()> { - let mut slots = - ledger_db.iter_with_direction::(Default::default(), ScanDirection::Backward)?; - slots.seek_to_last(); - - for record in slots { - let Ok(record) = record else { - continue; - }; - - if record.value < slot_number { - break; - } - - if !matches!(node_type, StorageNodeType::LightClient) { - ledger_db.delete::(&record.key)?; - } - - ledger_db.delete::(&record.key)?; - } - - Ok(()) -} - -fn delete_verified_proofs_by_slot_number( - ledger_db: &DB, - slot_number: SlotNumber, -) -> anyhow::Result<()> { - let mut verified_proofs_by_number = ledger_db - .iter_with_direction::( - Default::default(), - ScanDirection::Backward, - )?; - verified_proofs_by_number.seek_to_last(); - - for record in verified_proofs_by_number { - let Ok(record) = record else { - continue; - }; - if record.key >= slot_number { - ledger_db.delete::(&record.key)?; - } - } - - Ok(()) -} From 001b1ab4cbb17a8ec05b6f7fe90462ff2aae0f2c Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Fri, 28 Feb 2025 11:26:57 +0300 Subject: [PATCH 44/45] Compare state roots --- bin/citrea/tests/mock/rollback.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index bee8da834e..6523ac601b 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -566,6 +566,22 @@ async fn test_fullnode_rollback_without_sequencer_rollback() -> Result<(), anyho wait_for_l2_block(&seq_test_client, 40, None).await; wait_for_l2_block(&full_node_test_client, 40, None).await; + let seq_soft_confirmation = seq_test_client + .ledger_get_head_soft_confirmation() + .await + .unwrap() + .unwrap(); + let full_node_soft_confirmation = full_node_test_client + .ledger_get_head_soft_confirmation() + .await + .unwrap() + .unwrap(); + + assert_eq!( + seq_soft_confirmation.state_root, + full_node_soft_confirmation.state_root + ); + seq_task_manager.abort().await; full_node_task_manager.abort().await; From 0345ec8d740ca50bc53e9c81ebcb1ba1fd917f44 Mon Sep 17 00:00:00 2001 From: Rakan Alhneiti Date: Fri, 28 Feb 2025 13:09:14 +0300 Subject: [PATCH 45/45] Fix prover test --- bin/citrea/tests/mock/rollback.rs | 2 +- .../rollback/components/ledger_db/slots.rs | 23 +++++++++++-------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/bin/citrea/tests/mock/rollback.rs b/bin/citrea/tests/mock/rollback.rs index 6523ac601b..41dbf8d707 100644 --- a/bin/citrea/tests/mock/rollback.rs +++ b/bin/citrea/tests/mock/rollback.rs @@ -747,7 +747,7 @@ async fn test_batch_prover_rollback() -> Result<(), anyhow::Error> { .unwrap(); //------------------ - // Assert state after re-sync + // Assert state after rollback //------------------ let new_sequencer_db_dir = storage_dir.path().join("sequencer4").to_path_buf(); copy_db_dir_recursive( diff --git a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs index 1c65e3cadf..fca2f999f1 100644 --- a/crates/storage-ops/src/rollback/components/ledger_db/slots.rs +++ b/crates/storage-ops/src/rollback/components/ledger_db/slots.rs @@ -31,6 +31,14 @@ pub(crate) fn rollback_slots( delete_slots_by_number(node_type, ledger_db, slot_height)?; + if !matches!(node_type, StorageNodeType::Sequencer) { + rollback_slot_by_hash(node_type, ledger_db, slot_height)?; + } + + if matches!(node_type, StorageNodeType::FullNode) { + rollback_verified_proofs_by_slot_number(ledger_db, slot_height)?; + } + deleted += 1; } @@ -62,14 +70,6 @@ pub(crate) fn rollback_light_client_slots( delete_slots_by_number(node_type, ledger_db, slot_height)?; - if !matches!(node_type, StorageNodeType::Sequencer) { - rollback_slot_by_hash(node_type, ledger_db, slot_height)?; - } - - if matches!(node_type, StorageNodeType::FullNode) { - rollback_verified_proofs_by_slot_number(ledger_db, slot_height)?; - } - deleted += 1; } @@ -119,9 +119,12 @@ fn rollback_verified_proofs_by_slot_number( let Ok(record) = record else { continue; }; - if record.key >= slot_number { - ledger_db.delete::(&record.key)?; + + if record.key < slot_number { + break; } + + ledger_db.delete::(&record.key)?; } Ok(())