Skip to content

Commit

Permalink
Remove channel that sends roots to BlockstoreCleanupService (solana-l…
Browse files Browse the repository at this point in the history
…abs#35211)

Currently, ReplayStage sends new roots to BlockstoreCleanupService, and
BlockstoreCleanupService decides when to clean based on advancement of
the latest root. This is totally unnecessary as the latest root is
cached by the Blockstore, and this value can simply be fetched.

This change removes the channel completely, and instead just fetches
the latest root from Blockstore directly. Moreso, some logic is added
to check the latest root less frequently, based on the set purge
interval.

All in all, we went from sending > 100 slots/min across a crossbeam
channel to reading an atomic roughly 3 times/min, while also removing
the need for an additional thread that read from the channel.
  • Loading branch information
steviez authored Feb 21, 2024
1 parent 5c04a97 commit 4905076
Show file tree
Hide file tree
Showing 3 changed files with 74 additions and 136 deletions.
9 changes: 0 additions & 9 deletions core/src/replay_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,6 @@ pub struct ReplayStageConfig {
pub exit: Arc<AtomicBool>,
pub rpc_subscriptions: Arc<RpcSubscriptions>,
pub leader_schedule_cache: Arc<LeaderScheduleCache>,
pub latest_root_senders: Vec<Sender<Slot>>,
pub accounts_background_request_sender: AbsRequestSender,
pub block_commitment_cache: Arc<RwLock<BlockCommitmentCache>>,
pub transaction_status_sender: Option<TransactionStatusSender>,
Expand Down Expand Up @@ -551,7 +550,6 @@ impl ReplayStage {
exit,
rpc_subscriptions,
leader_schedule_cache,
latest_root_senders,
accounts_background_request_sender,
block_commitment_cache,
transaction_status_sender,
Expand Down Expand Up @@ -951,7 +949,6 @@ impl ReplayStage {
&leader_schedule_cache,
&lockouts_sender,
&accounts_background_request_sender,
&latest_root_senders,
&rpc_subscriptions,
&block_commitment_cache,
&mut heaviest_subtree_fork_choice,
Expand Down Expand Up @@ -2230,7 +2227,6 @@ impl ReplayStage {
leader_schedule_cache: &Arc<LeaderScheduleCache>,
lockouts_sender: &Sender<CommitmentAggregationData>,
accounts_background_request_sender: &AbsRequestSender,
latest_root_senders: &[Sender<Slot>],
rpc_subscriptions: &Arc<RpcSubscriptions>,
block_commitment_cache: &Arc<RwLock<BlockCommitmentCache>>,
heaviest_subtree_fork_choice: &mut HeaviestSubtreeForkChoice,
Expand Down Expand Up @@ -2319,11 +2315,6 @@ impl ReplayStage {
.unwrap_or_else(|err| warn!("bank_notification_sender failed: {:?}", err));
}
}
latest_root_senders.iter().for_each(|s| {
if let Err(e) = s.send(new_root) {
trace!("latest root send failed: {:?}", e);
}
});
info!("new root {}", new_root);
}

Expand Down
9 changes: 1 addition & 8 deletions core/src/tvu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -246,14 +246,12 @@ impl Tvu {
exit.clone(),
);

let (blockstore_cleanup_slot_sender, blockstore_cleanup_slot_receiver) = unbounded();
let replay_stage_config = ReplayStageConfig {
vote_account: *vote_account,
authorized_voter_keypairs,
exit: exit.clone(),
rpc_subscriptions: rpc_subscriptions.clone(),
leader_schedule_cache: leader_schedule_cache.clone(),
latest_root_senders: vec![blockstore_cleanup_slot_sender],
accounts_background_request_sender,
block_commitment_cache,
transaction_status_sender,
Expand Down Expand Up @@ -322,12 +320,7 @@ impl Tvu {
)?;

let blockstore_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| {
BlockstoreCleanupService::new(
blockstore_cleanup_slot_receiver,
blockstore.clone(),
max_ledger_shreds,
exit.clone(),
)
BlockstoreCleanupService::new(blockstore.clone(), max_ledger_shreds, exit.clone())
});

let duplicate_shred_listener = DuplicateShredListener::new(
Expand Down
Loading

0 comments on commit 4905076

Please sign in to comment.