-
Notifications
You must be signed in to change notification settings - Fork 4.6k
Wen restart aggregate last voted fork slots #33892
Changes from 10 commits
47f8891
a8d0c08
f7b8232
f5f71b4
630cc70
ce32c03
b90185d
3c819f0
e21efe3
b24b8db
a2204f3
0c1ef0f
b9324c8
122314d
e1252a4
31ca285
c3ab972
8fc2327
229f447
bc1b4b5
8743b5c
4ebbde8
0d82a7c
5e0a5b1
ec21ec1
c172c26
08de626
ea4d800
4f91be7
de89a4e
1e98324
777523f
1e478e8
b0980e4
f4acd69
bb471c1
c45a29b
167b790
e0a070f
8be5cd0
ddd144e
1cfc510
5b10c6e
0620aaf
1ceda56
93abe45
cb1788e
72a732e
4c920cb
bf71c9b
056aef7
645452f
f46e62a
e3d0194
ffbb20c
3b50964
59fd5ff
e7c320c
9ccf5ca
021dbe9
40c0fb6
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -213,6 +213,8 @@ pub struct RepairInfo { | |
pub repair_validators: Option<HashSet<Pubkey>>, | ||
// Validators which should be given priority when serving | ||
pub repair_whitelist: Arc<RwLock<HashSet<Pubkey>>>, | ||
// A given list of slots to repair when in wen_restart | ||
pub slots_to_repair_for_wen_restart: Option<Arc<RwLock<Vec<Slot>>>>, | ||
} | ||
|
||
pub struct RepairSlotRange { | ||
|
@@ -386,17 +388,24 @@ impl RepairService { | |
); | ||
add_votes_elapsed.stop(); | ||
|
||
let repairs = repair_weight.get_best_weighted_repairs( | ||
blockstore, | ||
root_bank.epoch_stakes_map(), | ||
root_bank.epoch_schedule(), | ||
MAX_ORPHANS, | ||
MAX_REPAIR_LENGTH, | ||
MAX_UNKNOWN_LAST_INDEX_REPAIRS, | ||
MAX_CLOSEST_COMPLETION_REPAIRS, | ||
&mut repair_timing, | ||
&mut best_repairs_stats, | ||
); | ||
let repairs = match repair_info.slots_to_repair_for_wen_restart.clone() { | ||
Some(slots_to_repair) => Self::generate_repairs_for_wen_restart( | ||
blockstore, | ||
MAX_REPAIR_LENGTH, | ||
AshwinSekar marked this conversation as resolved.
Show resolved
Hide resolved
|
||
&slots_to_repair.read().unwrap(), | ||
), | ||
None => repair_weight.get_best_weighted_repairs( | ||
blockstore, | ||
root_bank.epoch_stakes_map(), | ||
root_bank.epoch_schedule(), | ||
MAX_ORPHANS, | ||
MAX_REPAIR_LENGTH, | ||
MAX_UNKNOWN_LAST_INDEX_REPAIRS, | ||
MAX_CLOSEST_COMPLETION_REPAIRS, | ||
&mut repair_timing, | ||
&mut best_repairs_stats, | ||
), | ||
}; | ||
|
||
let mut popular_pruned_forks = repair_weight.get_popular_pruned_forks( | ||
root_bank.epoch_stakes_map(), | ||
|
@@ -612,26 +621,29 @@ impl RepairService { | |
slot: Slot, | ||
slot_meta: &SlotMeta, | ||
max_repairs: usize, | ||
add_delay_after_first_shred: bool, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: maybe change name to There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done. |
||
) -> Vec<ShredRepairType> { | ||
if max_repairs == 0 || slot_meta.is_full() { | ||
vec![] | ||
} else if slot_meta.consumed == slot_meta.received { | ||
// check delay time of last shred | ||
if let Some(reference_tick) = slot_meta | ||
.received | ||
.checked_sub(1) | ||
.and_then(|index| blockstore.get_data_shred(slot, index).ok()?) | ||
.and_then(|shred| shred::layout::get_reference_tick(&shred).ok()) | ||
.map(u64::from) | ||
{ | ||
// System time is not monotonic | ||
let ticks_since_first_insert = DEFAULT_TICKS_PER_SECOND | ||
* timestamp().saturating_sub(slot_meta.first_shred_timestamp) | ||
/ 1_000; | ||
if ticks_since_first_insert | ||
< reference_tick.saturating_add(DEFER_REPAIR_THRESHOLD_TICKS) | ||
if add_delay_after_first_shred { | ||
AshwinSekar marked this conversation as resolved.
Show resolved
Hide resolved
|
||
if let Some(reference_tick) = slot_meta | ||
.received | ||
.checked_sub(1) | ||
.and_then(|index| blockstore.get_data_shred(slot, index).ok()?) | ||
.and_then(|shred| shred::layout::get_reference_tick(&shred).ok()) | ||
.map(u64::from) | ||
{ | ||
return vec![]; | ||
// System time is not monotonic | ||
let ticks_since_first_insert = DEFAULT_TICKS_PER_SECOND | ||
* timestamp().saturating_sub(slot_meta.first_shred_timestamp) | ||
/ 1_000; | ||
if ticks_since_first_insert | ||
< reference_tick.saturating_add(DEFER_REPAIR_THRESHOLD_TICKS) | ||
{ | ||
return vec![]; | ||
} | ||
} | ||
} | ||
vec![ShredRepairType::HighestShred(slot, slot_meta.received)] | ||
|
@@ -667,6 +679,7 @@ impl RepairService { | |
slot, | ||
&slot_meta, | ||
max_repairs - repairs.len(), | ||
true, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: could you add the param name in a comment like There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done. |
||
); | ||
repairs.extend(new_repairs); | ||
let next_slots = slot_meta.next_slots; | ||
|
@@ -677,6 +690,32 @@ impl RepairService { | |
} | ||
} | ||
|
||
pub(crate) fn generate_repairs_for_wen_restart( | ||
blockstore: &Blockstore, | ||
max_repairs: usize, | ||
slots: &Vec<Slot>, | ||
) -> Vec<ShredRepairType> { | ||
let mut result: Vec<ShredRepairType> = Vec::new(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done. |
||
for slot in slots { | ||
if let Some(slot_meta) = blockstore.meta(*slot).unwrap() { | ||
let new_repairs = Self::generate_repairs_for_slot( | ||
AshwinSekar marked this conversation as resolved.
Show resolved
Hide resolved
|
||
blockstore, | ||
*slot, | ||
&slot_meta, | ||
max_repairs - result.len(), | ||
false, | ||
); | ||
result.extend(new_repairs); | ||
} else { | ||
result.push(ShredRepairType::HighestShred(*slot, 0)); | ||
} | ||
if result.len() >= max_repairs { | ||
break; | ||
} | ||
} | ||
result | ||
} | ||
|
||
/// Generate repairs for all slots `x` in the repair_range.start <= x <= repair_range.end | ||
#[cfg(test)] | ||
pub fn generate_repairs_in_range( | ||
|
@@ -704,6 +743,7 @@ impl RepairService { | |
slot, | ||
&meta, | ||
max_repairs - repairs.len(), | ||
true, | ||
); | ||
repairs.extend(new_repairs); | ||
} | ||
|
@@ -726,6 +766,7 @@ impl RepairService { | |
slot, | ||
&slot_meta, | ||
MAX_REPAIR_PER_DUPLICATE, | ||
true, | ||
)) | ||
} | ||
} else { | ||
|
@@ -1348,4 +1389,64 @@ mod test { | |
); | ||
assert_ne!(duplicate_status.repair_pubkey_and_addr, dummy_addr); | ||
} | ||
|
||
#[test] | ||
fn test_generate_repairs_for_wen_restart() { | ||
solana_logger::setup(); | ||
let ledger_path = get_tmp_ledger_path_auto_delete!(); | ||
let blockstore = Blockstore::open(ledger_path.path()).unwrap(); | ||
let max_repairs = 3; | ||
|
||
let slots: Vec<u64> = vec![2, 3, 5, 7]; | ||
let num_entries_per_slot = max_ticks_per_n_shreds(1, None) + 1; | ||
|
||
let shreds = make_chaining_slot_entries(&slots, num_entries_per_slot); | ||
for (mut slot_shreds, _) in shreds.into_iter() { | ||
slot_shreds.remove(1); | ||
blockstore.insert_shreds(slot_shreds, None, false).unwrap(); | ||
} | ||
sleep_shred_deferment_period(); | ||
carllin marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
let mut slots_to_repair: Vec<Slot> = vec![]; | ||
|
||
// When slots_to_repair is empty, ignore all and return empty result. | ||
let result = RepairService::generate_repairs_for_wen_restart( | ||
&blockstore, | ||
max_repairs, | ||
&slots_to_repair, | ||
); | ||
assert!(result.is_empty()); | ||
|
||
// When asked to repair dead_slot and some unknown slot, return correct results. | ||
carllin marked this conversation as resolved.
Show resolved
Hide resolved
|
||
slots_to_repair = vec![2, 81]; | ||
let result = RepairService::generate_repairs_for_wen_restart( | ||
&blockstore, | ||
max_repairs, | ||
&slots_to_repair, | ||
); | ||
assert_eq!( | ||
result, | ||
vec![ | ||
ShredRepairType::HighestShred(2, 1), | ||
ShredRepairType::HighestShred(81, 0), | ||
], | ||
); | ||
|
||
// Test that it will not generate more than max_repairs.e().unwrap(); | ||
slots_to_repair = vec![3, 82, 5, 83, 7, 84]; | ||
let result = RepairService::generate_repairs_for_wen_restart( | ||
&blockstore, | ||
max_repairs, | ||
&slots_to_repair, | ||
); | ||
assert_eq!(result.len(), max_repairs); | ||
assert_eq!( | ||
result, | ||
vec![ | ||
ShredRepairType::HighestShred(3, 1), | ||
ShredRepairType::HighestShred(82, 0), | ||
ShredRepairType::HighestShred(5, 1), | ||
], | ||
carllin marked this conversation as resolved.
Show resolved
Hide resolved
|
||
); | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
suggestion:
s/slots_to_repair_for_wen_restart/wen_restart_repair_slots/
for brevityThere was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.