Skip to content

Commit

Permalink
move window "cover" logic into WindowManager
Browse files Browse the repository at this point in the history
  • Loading branch information
michaelsutton committed Nov 28, 2024
1 parent bbd506a commit bc15537
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 26 deletions.
2 changes: 1 addition & 1 deletion consensus/src/model/stores/block_window_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use std::{
sync::Arc,
};

#[derive(Clone, Copy, PartialEq, Eq)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum WindowOrigin {
Full,
Sampled,
Expand Down
27 changes: 2 additions & 25 deletions consensus/src/processes/pruning_proof/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -278,32 +278,9 @@ impl PruningProofManager {
// PRUNE SAFETY: called either via consensus under the prune guard or by the pruning processor (hence no pruning in parallel)

for anticone_block in anticone.iter().copied() {
let mut ghostdag = self.ghostdag_store.get_data(anticone_block).unwrap();
let ghostdag = self.ghostdag_store.get_data(anticone_block).unwrap();
let window = self.window_manager.block_window(&ghostdag, WindowType::DifficultyWindow).unwrap();

// Make sure we extract a full consecutive window containing all blocks required to restore the (possibly sampled) window.
// In the sampling case, the mechanism relies on DAA indexes which can only be calculated correctly if the full
// mergesets covering all sampled blocks are sent.
let cover = match self.window_manager.sampling(ghostdag.selected_parent) {
true => {
// Tracks the window blocks to make sure we visit all blocks
let mut unvisited: BlockHashSet = window.iter().map(|b| b.0.hash).collect();
let capacity_estimate =
window.len() * self.window_manager.sample_rate(&ghostdag, WindowType::DifficultyWindow) as usize;
// The full consecutive window covering all sampled window blocks and the full mergesets containing them
let mut cover = Vec::with_capacity(capacity_estimate);
while !unvisited.is_empty() {
assert!(!ghostdag.selected_parent.is_origin(), "unvisited still not empty");
for merged in ghostdag.unordered_mergeset() {
cover.push(merged);
unvisited.remove(&merged);
}
ghostdag = self.ghostdag_store.get_data(ghostdag.selected_parent).unwrap();
}
cover
}
false => window.iter().map(|b| b.0.hash).collect(),
};
let cover = self.window_manager.consecutive_cover_for_window(ghostdag, &window);

for hash in cover {
if let Entry::Vacant(e) = daa_window_blocks.entry(hash) {
Expand Down
47 changes: 47 additions & 0 deletions consensus/src/processes/window.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,9 @@ pub trait WindowManager {
fn estimate_network_hashes_per_second(&self, window: Arc<BlockWindowHeap>) -> DifficultyResult<u64>;
fn window_size(&self, ghostdag_data: &GhostdagData, window_type: WindowType) -> usize;
fn sample_rate(&self, ghostdag_data: &GhostdagData, window_type: WindowType) -> u64;

/// Returns the full consecutive sub-DAG containing all blocks required to restore the (possibly sampled) window.
fn consecutive_cover_for_window(&self, ghostdag_data: Arc<GhostdagData>, window: &BlockWindowHeap) -> Vec<Hash>;
}

trait AffiliatedWindowCacheReader {
Expand Down Expand Up @@ -273,6 +276,11 @@ impl<T: GhostdagStoreReader, U: BlockWindowCacheReader + BlockWindowCacheWriter,
fn sample_rate(&self, _ghostdag_data: &GhostdagData, _window_type: WindowType) -> u64 {
1
}

fn consecutive_cover_for_window(&self, _ghostdag_data: Arc<GhostdagData>, window: &BlockWindowHeap) -> Vec<Hash> {
assert_eq!(WindowOrigin::Full, window.origin());
window.iter().map(|b| b.0.hash).collect()
}
}

type DaaStatus = Option<(u64, BlockHashSet)>;
Expand Down Expand Up @@ -610,6 +618,38 @@ impl<T: GhostdagStoreReader, U: BlockWindowCacheReader + BlockWindowCacheWriter,
WindowType::VaryingWindow(_) => 1,
}
}

fn consecutive_cover_for_window(&self, mut ghostdag: Arc<GhostdagData>, window: &BlockWindowHeap) -> Vec<Hash> {
assert_eq!(WindowOrigin::Sampled, window.origin());

// In the sampled case, the sampling logic relies on DAA indexes which can only be calculated correctly if the full
// mergesets covering all sampled blocks are sent.

// Tracks the window blocks to make sure we visit all blocks
let mut unvisited: BlockHashSet = window.iter().map(|b| b.0.hash).collect();
let capacity_estimate = window.len() * self.difficulty_sample_rate as usize;
// The full consecutive window covering all sampled window blocks and the full mergesets containing them
let mut cover = Vec::with_capacity(capacity_estimate);
while !unvisited.is_empty() {
assert!(!ghostdag.selected_parent.is_origin(), "unvisited still not empty");
// TODO (relaxed): a possible optimization here is to iterate in the same order as
// sampled_mergeset_iterator (descending_mergeset) and to break once all samples from
// this mergeset are reached.
// * Why is this sufficient? bcs we still send the prefix of the mergeset required for
// obtaining the DAA index for all sampled blocks.
// * What's the benefit? This might exclude deeply merged blocks which in turn will help
// reducing the number of trusted blocks sent to a fresh syncing peer.
for merged in ghostdag.unordered_mergeset() {
cover.push(merged);
unvisited.remove(&merged);
}
if unvisited.is_empty() {
break;
}
ghostdag = self.ghostdag_store.get_data(ghostdag.selected_parent).unwrap();
}
cover
}
}

/// A window manager handling either full (un-sampled) or sampled windows depending on an activation DAA score
Expand Down Expand Up @@ -751,6 +791,13 @@ impl<T: GhostdagStoreReader, U: BlockWindowCacheReader + BlockWindowCacheWriter,
false => self.full_window_manager.sample_rate(ghostdag_data, window_type),
}
}

fn consecutive_cover_for_window(&self, ghostdag_data: Arc<GhostdagData>, window: &BlockWindowHeap) -> Vec<Hash> {
match window.origin() {
WindowOrigin::Sampled => self.sampled_window_manager.consecutive_cover_for_window(ghostdag_data, window),
WindowOrigin::Full => self.full_window_manager.consecutive_cover_for_window(ghostdag_data, window),
}
}
}

struct BoundedSizeBlockHeap {
Expand Down

0 comments on commit bc15537

Please sign in to comment.