Skip to content

Commit

Permalink
Implement standalone daemon for integration tests (#241)
Browse files Browse the repository at this point in the history
* separate daemon

* clippy

* Remove unused

* temp commit

* fix sanity_integration_test

* Renme daemon to component manager

* Fix comment

* Don't init logger on DaemonWithRpc

* Add separate struct for runtime

* fmt

* clippy

* kos - fix trait markers for the latest rust update

* set resolver="1" in /Cargo.toml

* switch to resolver="2" in Cargo.toml

* address review comments

---------

Co-authored-by: Anton Yemelyanov <[email protected]>
  • Loading branch information
someone235 and aspect authored Aug 25, 2023
1 parent 72c87c2 commit 0f40df9
Show file tree
Hide file tree
Showing 33 changed files with 663 additions and 473 deletions.
14 changes: 14 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

11 changes: 8 additions & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
[workspace]
resolver = "2"
members = [
"daemon",
"cli",
Expand Down Expand Up @@ -49,7 +50,7 @@ members = [
"testing/integration",
"utils",
"rothschild",
"metrics/perf_monitor"
"metrics/perf_monitor",
]

[workspace.package]
Expand Down Expand Up @@ -188,9 +189,13 @@ xxhash-rust = { version = "0.8.6", features = ["xxh3"] }
rand_core = { version = "0.6", features = ["std"] }
bs58 = { version = "0.4", features = ["check"], default-features = false }
hmac = { version = "0.12", default-features = false }
secp256k1 = { version = "0.24", features = ["global-context", "rand-std", "serde"] }
secp256k1 = { version = "0.24", features = [
"global-context",
"rand-std",
"serde",
] }
#sha2 = {version = "0.10", default-features = false}
zeroize = { version = "1", default-features = false, features=["alloc"] }
zeroize = { version = "1", default-features = false, features = ["alloc"] }
ripemd = { version = "0.1", default-features = false }
subtle = { version = "2", default-features = false }
once_cell = { version = "1" }
Expand Down
2 changes: 1 addition & 1 deletion cli/src/modules/wallet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ impl Wallet {
}
"hint" => {
if !argv.is_empty() {
let re = regex::Regex::new(r#"wallet\s+hint\s+"#).unwrap();
let re = regex::Regex::new(r"wallet\s+hint\s+").unwrap();
let hint = re.replace(cmd, "");
let hint = hint.trim();
let store = ctx.store();
Expand Down
3 changes: 3 additions & 0 deletions cli/src/notifier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ struct Inner {
current: Mutex<Option<Element>>,
}

unsafe impl Send for Inner {}
unsafe impl Sync for Inner {}

#[derive(Clone)]
pub struct Notifier {
inner: Arc<Inner>,
Expand Down
3 changes: 0 additions & 3 deletions consensus/core/src/config/params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -268,8 +268,6 @@ impl From<NetworkId> for Params {

pub const MAINNET_PARAMS: Params = Params {
dns_seeders: &[
// This DNS seeder is run by Wolfie
"mainnet-dnsseed.kas.pa",
// This DNS seeder is run by Denis Mashkevich
"mainnet-dnsseed-1.kaspanet.org",
// This DNS seeder is run by Denis Mashkevich
Expand Down Expand Up @@ -342,7 +340,6 @@ pub const MAINNET_PARAMS: Params = Params {

pub const TESTNET_PARAMS: Params = Params {
dns_seeders: &[
"testnet-10-dnsseed.kas.pa",
// This DNS seeder is run by Tiram
"seeder1-testnet.kaspad.net",
],
Expand Down
4 changes: 1 addition & 3 deletions consensus/core/src/tx/script_public_key.rs
Original file line number Diff line number Diff line change
Expand Up @@ -221,9 +221,7 @@ impl<'de: 'a, 'a> Deserialize<'de> for ScriptPublicKey {
}
impl From<Value<'_>> for u16 {
fn from(value: Value<'_>) -> Self {
let Value::U16(v) = value else {
panic!("unexpected conversion: {value:?}")
};
let Value::U16(v) = value else { panic!("unexpected conversion: {value:?}") };
v
}
}
Expand Down
12 changes: 9 additions & 3 deletions consensus/src/consensus/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -382,8 +382,12 @@ impl ConsensusApi for Consensus {
fn get_virtual_merge_depth_root(&self) -> Option<Hash> {
// TODO: consider saving the merge depth root as part of virtual state
// TODO: unwrap on pruning_point and virtual state reads when staging consensus is implemented
let Some(pruning_point) = self.pruning_point_store.read().pruning_point().unwrap_option() else { return None; };
let Some(virtual_state) = self.virtual_stores.read().state.get().unwrap_option() else { return None; };
let Some(pruning_point) = self.pruning_point_store.read().pruning_point().unwrap_option() else {
return None;
};
let Some(virtual_state) = self.virtual_stores.read().state.get().unwrap_option() else {
return None;
};
let virtual_ghostdag_data = &virtual_state.ghostdag_data;
let root = self.services.depth_manager.calc_merge_depth_root(virtual_ghostdag_data, pruning_point);
if root.is_origin() {
Expand Down Expand Up @@ -685,7 +689,9 @@ impl ConsensusApi for Consensus {
// to k blocks back and then we would be able to safely unwrap here. For now we
// just break the loop, since if the data was truly missing we wouldn't accept
// the staging consensus in the first place
let Some(parent) = self.ghostdag_primary_store.get_selected_parent(current).unwrap_option() else { break; };
let Some(parent) = self.ghostdag_primary_store.get_selected_parent(current).unwrap_option() else {
break;
};
current = parent;
}
Ok(hashes)
Expand Down
4 changes: 2 additions & 2 deletions consensus/src/model/stores/relations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,8 @@ impl DbRelationsStore {
}

pub fn with_prefix(db: Arc<DB>, prefix: &[u8], cache_size: u64) -> Self {
let parents_prefix = prefix.iter().copied().chain(DatabaseStorePrefixes::RelationsParents.into_iter()).collect_vec();
let children_prefix = prefix.iter().copied().chain(DatabaseStorePrefixes::RelationsChildren.into_iter()).collect_vec();
let parents_prefix = prefix.iter().copied().chain(DatabaseStorePrefixes::RelationsParents).collect_vec();
let children_prefix = prefix.iter().copied().chain(DatabaseStorePrefixes::RelationsChildren).collect_vec();
Self {
db: Arc::clone(&db),
parents_access: CachedDbAccess::new(Arc::clone(&db), cache_size, parents_prefix),
Expand Down
4 changes: 3 additions & 1 deletion consensus/src/pipeline/header_processor/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,9 @@ impl HeaderProcessor {
pub fn worker(self: &Arc<HeaderProcessor>) {
while let Ok(msg) = self.receiver.recv() {
match msg {
BlockProcessingMessage::Exit => break,
BlockProcessingMessage::Exit => {
break;
}
BlockProcessingMessage::Process(task, result_transmitter) => {
if let Some(task_id) = self.task_manager.register(task, result_transmitter) {
let processor = self.clone();
Expand Down
8 changes: 6 additions & 2 deletions consensus/src/pipeline/pruning_processor/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,9 @@ impl PruningProcessor {
}

pub fn worker(self: &Arc<Self>) {
let Ok(PruningProcessingMessage::Process { sink_ghostdag_data }) = self.receiver.recv() else { return; };
let Ok(PruningProcessingMessage::Process { sink_ghostdag_data }) = self.receiver.recv() else {
return;
};

// On start-up, check if any pruning workflows require recovery. We wait for the first processing message to arrive
// in order to make sure the node is already connected and receiving blocks before we start background recovery operations
Expand Down Expand Up @@ -258,7 +260,9 @@ impl PruningProcessor {
let mut counter = 0;
let mut batch = WriteBatch::default();
for kept in keep_relations.iter().copied() {
let Some(ghostdag) = self.ghostdag_primary_store.get_data(kept).unwrap_option() else { continue; };
let Some(ghostdag) = self.ghostdag_primary_store.get_data(kept).unwrap_option() else {
continue;
};
if ghostdag.unordered_mergeset().any(|h| !keep_relations.contains(&h)) {
let mut mutable_ghostdag: ExternalGhostdagData = ghostdag.as_ref().into();
mutable_ghostdag.mergeset_blues.retain(|h| keep_relations.contains(h));
Expand Down
8 changes: 6 additions & 2 deletions consensus/src/processes/sync/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -170,15 +170,19 @@ impl<
let mut backward_iterator = self.reachability_service.backward_chain_iterator(high, pp, true);
loop {
// We loop from both directions in parallel in order to use the shorter path
let Some((parent, current)) = forward_iterator.next() else { break; };
let Some((parent, current)) = forward_iterator.next() else {
break;
};
let status = self.statuses_store.read().get(current).unwrap();
if status.is_header_only() {
// Going up, the first parent which has a header-only child is our target
highest_with_body = Some(parent);
break;
}

let Some(backward_current) = backward_iterator.next() else { break; };
let Some(backward_current) = backward_iterator.next() else {
break;
};
let status = self.statuses_store.read().get(backward_current).unwrap();
if status.has_block_body() {
// Since this iterator is going down, current must be the highest with body
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -568,7 +568,7 @@ mod tests {
let secp = Secp256k1::new();
let (secret_key, public_key) = secp.generate_keypair(&mut rand::thread_rng());
let (public_key, _) = public_key.x_only_public_key();
let script_pub_key = once(0x20).chain(public_key.serialize().into_iter()).chain(once(0xac)).collect_vec();
let script_pub_key = once(0x20).chain(public_key.serialize()).chain(once(0xac)).collect_vec();
let script_pub_key = ScriptVec::from_slice(&script_pub_key);

let prev_tx_id = TransactionId::from_str("880eb9819a31821d9d2399e2f35e2433b72637e393d71ecc9b8d0250f49153c3").unwrap();
Expand Down
5 changes: 4 additions & 1 deletion kaspad/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@ authors.workspace = true
include.workspace = true
license.workspace = true

[lib]
crate-type = ["cdylib", "lib"]

[dependencies]
kaspa-hashes.workspace = true
kaspa-utils.workspace = true
Expand Down Expand Up @@ -42,7 +45,7 @@ workflow-log.workspace = true
dirs = "4.0"
num_cpus.workspace = true

dhat = {version = "0.3.2", optional = true}
dhat = { version = "0.3.2", optional = true }

[features]
heap = ["dhat"]
Loading

0 comments on commit 0f40df9

Please sign in to comment.